From 0e4d653687f81db40622e287a846245b319f1fbe Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 28 Jul 2024 19:56:02 -0700 Subject: [PATCH] upate to `llama3.1` elsewhere in repo (#6032) --- app/ollama.iss | 2 +- app/ollama_welcome.ps1 | 2 +- docs/docker.md | 2 +- docs/faq.md | 2 +- docs/tutorials/langchainjs.md | 4 ++-- macapp/src/app.tsx | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/app/ollama.iss b/app/ollama.iss index 6bedb9ff..dc6178f7 100644 --- a/app/ollama.iss +++ b/app/ollama.iss @@ -138,7 +138,7 @@ SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or fi ;FinishedHeadingLabel=Run your first model -;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3 +;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3.1 ;ClickFinish=%n [Registry] diff --git a/app/ollama_welcome.ps1 b/app/ollama_welcome.ps1 index 9af37a46..46777a3a 100644 --- a/app/ollama_welcome.ps1 +++ b/app/ollama_welcome.ps1 @@ -4,5 +4,5 @@ write-host "Welcome to Ollama!" write-host "" write-host "Run your first model:" write-host "" -write-host "`tollama run llama3" +write-host "`tollama run llama3.1" write-host "" \ No newline at end of file diff --git a/docs/docker.md b/docs/docker.md index 0b58562b..a34c3291 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -63,7 +63,7 @@ docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 114 Now you can run a model: ``` -docker exec -it ollama ollama run llama3 +docker exec -it ollama ollama run llama3.1 ``` ### Try different models diff --git a/docs/faq.md b/docs/faq.md index da1848f7..f2f32af4 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -227,7 +227,7 @@ curl http://localhost:11434/api/chat -d '{"model": "mistral"}' To preload a model using the CLI, use the command: ```shell -ollama run llama3 "" +ollama run llama3.1 "" ``` ## How do I keep a model loaded in memory or make it unload immediately? diff --git a/docs/tutorials/langchainjs.md b/docs/tutorials/langchainjs.md index 4d60afb6..f925869b 100644 --- a/docs/tutorials/langchainjs.md +++ b/docs/tutorials/langchainjs.md @@ -15,7 +15,7 @@ import { Ollama } from "@langchain/community/llms/ollama"; const ollama = new Ollama({ baseUrl: "http://localhost:11434", - model: "llama3", + model: "llama3.1", }); const answer = await ollama.invoke(`why is the sky blue?`); @@ -23,7 +23,7 @@ const answer = await ollama.invoke(`why is the sky blue?`); console.log(answer); ``` -That will get us the same thing as if we ran `ollama run llama3 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app. +That will get us the same thing as if we ran `ollama run llama3.1 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app. ```bash npm install cheerio diff --git a/macapp/src/app.tsx b/macapp/src/app.tsx index ab17df60..a627e63d 100644 --- a/macapp/src/app.tsx +++ b/macapp/src/app.tsx @@ -19,7 +19,7 @@ export default function () { const [step, setStep] = useState(Step.WELCOME) const [commandCopied, setCommandCopied] = useState(false) - const command = 'ollama run llama3' + const command = 'ollama run llama3.1' return (