diff --git a/examples/go-chat/main.go b/examples/go-chat/main.go index 5266f03e..7663fb8f 100644 --- a/examples/go-chat/main.go +++ b/examples/go-chat/main.go @@ -35,7 +35,7 @@ func main() { ctx := context.Background() req := &api.ChatRequest{ - Model: "llama3", + Model: "llama3.1", Messages: messages, } diff --git a/examples/go-generate-streaming/main.go b/examples/go-generate-streaming/main.go index 49403351..3acfb22a 100644 --- a/examples/go-generate-streaming/main.go +++ b/examples/go-generate-streaming/main.go @@ -16,7 +16,7 @@ func main() { // By default, GenerateRequest is streaming. req := &api.GenerateRequest{ - Model: "gemma", + Model: "gemma2", Prompt: "how many planets are there?", } diff --git a/examples/go-generate/main.go b/examples/go-generate/main.go index 50fbf64b..2fe28742 100644 --- a/examples/go-generate/main.go +++ b/examples/go-generate/main.go @@ -15,7 +15,7 @@ func main() { } req := &api.GenerateRequest{ - Model: "gemma", + Model: "gemma2", Prompt: "how many planets are there?", // set streaming to false diff --git a/examples/go-http-generate/README.md b/examples/go-http-generate/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/langchain-python-rag-document/README.md b/examples/langchain-python-rag-document/README.md index 20a73a88..e2f3bc02 100644 --- a/examples/langchain-python-rag-document/README.md +++ b/examples/langchain-python-rag-document/README.md @@ -4,6 +4,14 @@ This example provides an interface for asking questions to a PDF document. ## Setup +1. Ensure you have the `llama3.1` model installed: + +``` +ollama pull llama3.1 +``` + +2. Install the Python Requirements. + ``` pip install -r requirements.txt ``` diff --git a/examples/langchain-python-rag-document/main.py b/examples/langchain-python-rag-document/main.py index 3ed9499f..6f7cec9b 100644 --- a/examples/langchain-python-rag-document/main.py +++ b/examples/langchain-python-rag-document/main.py @@ -51,7 +51,7 @@ while True: template=template, ) - llm = Ollama(model="llama3:8b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])) + llm = Ollama(model="llama3.1", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])) qa_chain = RetrievalQA.from_chain_type( llm, retriever=vectorstore.as_retriever(), diff --git a/examples/langchain-python-rag-websummary/README.md b/examples/langchain-python-rag-websummary/README.md index 3f3b9873..29c706a3 100644 --- a/examples/langchain-python-rag-websummary/README.md +++ b/examples/langchain-python-rag-websummary/README.md @@ -4,10 +4,10 @@ This example summarizes the website, [https://ollama.com/blog/run-llama2-uncenso ## Running the Example -1. Ensure you have the `llama2` model installed: +1. Ensure you have the `llama3.1` model installed: ```bash - ollama pull llama2 + ollama pull llama3.1 ``` 2. Install the Python Requirements. diff --git a/examples/langchain-python-rag-websummary/main.py b/examples/langchain-python-rag-websummary/main.py index d1b05ba8..77b09fbb 100644 --- a/examples/langchain-python-rag-websummary/main.py +++ b/examples/langchain-python-rag-websummary/main.py @@ -5,8 +5,8 @@ from langchain.chains.summarize import load_summarize_chain loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally") docs = loader.load() -llm = Ollama(model="llama3") +llm = Ollama(model="llama3.1") chain = load_summarize_chain(llm, chain_type="stuff") -result = chain.invoke(docs) +result = chain.invoke(docs) print(result) diff --git a/examples/langchain-python-simple/README.md b/examples/langchain-python-simple/README.md index d4102dec..60db2c8c 100644 --- a/examples/langchain-python-simple/README.md +++ b/examples/langchain-python-simple/README.md @@ -4,10 +4,10 @@ This example is a basic "hello world" of using LangChain with Ollama. ## Running the Example -1. Ensure you have the `llama3` model installed: +1. Ensure you have the `llama3.1` model installed: ```bash - ollama pull llama3 + ollama pull llama3.1 ``` 2. Install the Python Requirements. diff --git a/examples/langchain-python-simple/main.py b/examples/langchain-python-simple/main.py index 7cb65286..a7ed81d6 100644 --- a/examples/langchain-python-simple/main.py +++ b/examples/langchain-python-simple/main.py @@ -1,6 +1,6 @@ from langchain.llms import Ollama input = input("What is your question?") -llm = Ollama(model="llama3") +llm = Ollama(model="llama3.1") res = llm.predict(input) print (res) diff --git a/examples/modelfile-mario/Modelfile b/examples/modelfile-mario/Modelfile index 33d5952b..a3747086 100644 --- a/examples/modelfile-mario/Modelfile +++ b/examples/modelfile-mario/Modelfile @@ -1,4 +1,4 @@ -FROM llama3 +FROM llama3.1 PARAMETER temperature 1 SYSTEM """ You are Mario from super mario bros, acting as an assistant. diff --git a/examples/modelfile-mario/readme.md b/examples/modelfile-mario/readme.md index e4f0d417..c3f34197 100644 --- a/examples/modelfile-mario/readme.md +++ b/examples/modelfile-mario/readme.md @@ -2,12 +2,12 @@ # Example character: Mario -This example shows how to create a basic character using Llama3 as the base model. +This example shows how to create a basic character using Llama3.1 as the base model. To run this example: 1. Download the Modelfile -2. `ollama pull llama3` to get the base model used in the model file. +2. `ollama pull llama3.1` to get the base model used in the model file. 3. `ollama create NAME -f ./Modelfile` 4. `ollama run NAME` @@ -18,7 +18,7 @@ Ask it some questions like "Who are you?" or "Is Peach in trouble again?" What the model file looks like: ``` -FROM llama3 +FROM llama3.1 PARAMETER temperature 1 SYSTEM """ You are Mario from Super Mario Bros, acting as an assistant. diff --git a/examples/python-dockerit/dockerit.py b/examples/python-dockerit/dockerit.py index b013102f..6a288d90 100644 --- a/examples/python-dockerit/dockerit.py +++ b/examples/python-dockerit/dockerit.py @@ -4,7 +4,7 @@ imageName = input("Enter the name of the image: ") client = docker.from_env() s = requests.Session() output="" -with s.post('http://localhost:11434/api/generate', json={'model': 'dockerit', 'prompt': inputDescription}, stream=True) as r: +with s.post('http://localhost:11434/api/generate', json={'model': 'mattw/dockerit', 'prompt': inputDescription}, stream=True) as r: for line in r.iter_lines(): if line: j = json.loads(line) diff --git a/examples/python-json-datagenerator/predefinedschema.py b/examples/python-json-datagenerator/predefinedschema.py index 1fd54892..68090ad7 100644 --- a/examples/python-json-datagenerator/predefinedschema.py +++ b/examples/python-json-datagenerator/predefinedschema.py @@ -2,7 +2,7 @@ import requests import json import random -model = "llama3" +model = "llama3.1" template = { "firstName": "", "lastName": "", diff --git a/examples/python-json-datagenerator/randomaddresses.py b/examples/python-json-datagenerator/randomaddresses.py index 72b1fefb..878c9803 100644 --- a/examples/python-json-datagenerator/randomaddresses.py +++ b/examples/python-json-datagenerator/randomaddresses.py @@ -12,7 +12,7 @@ countries = [ "France", ] country = random.choice(countries) -model = "llama3" +model = "llama3.1" prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters." diff --git a/examples/python-json-datagenerator/readme.md b/examples/python-json-datagenerator/readme.md index 88357044..5b444dff 100644 --- a/examples/python-json-datagenerator/readme.md +++ b/examples/python-json-datagenerator/readme.md @@ -6,10 +6,10 @@ There are two python scripts in this example. `randomaddresses.py` generates ran ## Running the Example -1. Ensure you have the `llama3` model installed: +1. Ensure you have the `llama3.1` model installed: ```bash - ollama pull llama3 + ollama pull llama3.1 ``` 2. Install the Python Requirements. diff --git a/examples/python-simplechat/client.py b/examples/python-simplechat/client.py index f82a16b3..85043d5f 100644 --- a/examples/python-simplechat/client.py +++ b/examples/python-simplechat/client.py @@ -2,7 +2,7 @@ import json import requests # NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve` -model = "llama3" # TODO: update this for whatever model you wish to use +model = "llama3.1" # TODO: update this for whatever model you wish to use def chat(messages): diff --git a/examples/python-simplechat/readme.md b/examples/python-simplechat/readme.md index dd2576bc..4c2ded4d 100644 --- a/examples/python-simplechat/readme.md +++ b/examples/python-simplechat/readme.md @@ -4,10 +4,10 @@ The **chat** endpoint is one of two ways to generate text from an LLM with Ollam ## Running the Example -1. Ensure you have the `llama3` model installed: +1. Ensure you have the `llama3.1` model installed: ```bash - ollama pull llama3 + ollama pull llama3.1 ``` 2. Install the Python Requirements. diff --git a/examples/typescript-simplechat/client.ts b/examples/typescript-simplechat/client.ts index a1e0eea3..8ad113b1 100644 --- a/examples/typescript-simplechat/client.ts +++ b/examples/typescript-simplechat/client.ts @@ -1,6 +1,6 @@ import * as readline from "readline"; -const model = "llama3"; +const model = "llama3.1"; type Message = { role: "assistant" | "user" | "system"; content: string;