diff --git a/examples/jupyter-notebook/README.md b/examples/jupyter-notebook/README.md new file mode 100644 index 00000000..fba6802f --- /dev/null +++ b/examples/jupyter-notebook/README.md @@ -0,0 +1,5 @@ +# Ollama Jupyter Notebook + +This example downloads and installs Ollama in a Jupyter instance such as Google Colab. It will start the Ollama service and expose an endpoint using `ngrok` which can be used to communicate with the Ollama instance remotely. + +For best results, use an instance with GPU accelerator. diff --git a/examples/jupyter-notebook/ollama.ipynb b/examples/jupyter-notebook/ollama.ipynb index b00f8862..d57e2057 100644 --- a/examples/jupyter-notebook/ollama.ipynb +++ b/examples/jupyter-notebook/ollama.ipynb @@ -1,17 +1,5 @@ { "cells": [ - { - "cell_type": "markdown", - "id": "38d57674-b3d5-40f3-ab83-9109df3a7821", - "metadata": {}, - "source": [ - "# Ollama Jupyter Notebook\n", - "\n", - "Ollama is the easiest way to run large language models (LLMs) locally. You can deploy it to macOS by installing the the macOS application, Linux by running the install script (below), and Docker or Kubernetes by pulling the official Ollama Docker image.\n", - "\n", - "For best results, this notebook should be run on a Linux node with a GPU or an environment like Google Colab." - ] - }, { "cell_type": "code", "execution_count": null, @@ -20,7 +8,8 @@ "outputs": [], "source": [ "# Download and run the Ollama Linux install script\n", - "!curl https://ollama.ai/install.sh | sh" + "!curl https://ollama.ai/install.sh | sh\n", + "!command -v systemctl >/dev/null && sudo systemctl stop ollama" ] }, {