diff --git a/.gitignore b/.gitignore index ba8852d0..713499f0 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,5 @@ .vscode .env .venv -*.spec dist ollama diff --git a/Dockerfile b/Dockerfile index edb8590d..cb7a63bb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,6 @@ FROM golang:1.20 -RUN apt-get update && apt-get install -y cmake WORKDIR /go/src/github.com/jmorganca/ollama COPY . . -RUN cmake -S llama -B llama/build && cmake --build llama/build RUN CGO_ENABLED=1 go build -ldflags '-linkmode external -extldflags "-static"' . FROM alpine diff --git a/README.md b/README.md index f9c3c62e..1fda2ef1 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,6 @@ Run large language models with `llama.cpp`. - [Download](https://ollama.ai/download) for macOS - Download for Windows (coming soon) -- Docker: `docker run -p 11434:11434 ollama/ollama` You can also build the [binary from source](#building). diff --git a/llama/ggml-metal.m b/llama/ggml-metal.m index bce68893..74401ae5 100644 --- a/llama/ggml-metal.m +++ b/llama/ggml-metal.m @@ -1,3 +1,5 @@ +// +build darwin + /** * llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066 *