From 92189a58553ac88edf19ea5a8834478bd5fd6d86 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 12 Oct 2023 09:34:16 -0700 Subject: [PATCH 1/5] fix memory check --- api/client.go | 3 ++- format/bytes.go | 19 +++++++++++++------ llm/llama.go | 2 +- llm/llm.go | 21 +++++++++++---------- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/api/client.go b/api/client.go index f308b233..14ea353d 100644 --- a/api/client.go +++ b/api/client.go @@ -14,6 +14,7 @@ import ( "runtime" "strings" + "github.com/jmorganca/ollama/format" "github.com/jmorganca/ollama/version" ) @@ -127,7 +128,7 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData return nil } -const maxBufferSize = 512 * 1000 // 512KB +const maxBufferSize = 512 * format.KiloByte func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error { var buf *bytes.Buffer diff --git a/format/bytes.go b/format/bytes.go index 63cc7b00..ca5ac640 100644 --- a/format/bytes.go +++ b/format/bytes.go @@ -2,14 +2,21 @@ package format import "fmt" +const ( + Byte = 1 + KiloByte = Byte * 1000 + MegaByte = KiloByte * 1000 + GigaByte = MegaByte * 1000 +) + func HumanBytes(b int64) string { switch { - case b > 1000*1000*1000: - return fmt.Sprintf("%d GB", b/1000/1000/1000) - case b > 1000*1000: - return fmt.Sprintf("%d MB", b/1000/1000) - case b > 1000: - return fmt.Sprintf("%d KB", b/1000) + case b > GigaByte: + return fmt.Sprintf("%d GB", b/GigaByte) + case b > MegaByte: + return fmt.Sprintf("%d MB", b/MegaByte) + case b > KiloByte: + return fmt.Sprintf("%d KB", b/KiloByte) default: return fmt.Sprintf("%d B", b) } diff --git a/llm/llama.go b/llm/llama.go index 0b460e9a..db51429c 100644 --- a/llm/llama.go +++ b/llm/llama.go @@ -509,7 +509,7 @@ type PredictRequest struct { Stop []string `json:"stop,omitempty"` } -const maxBufferSize = 512 * 1000 // 512KB +const maxBufferSize = 512 * format.KiloByte func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, fn func(api.GenerateResponse)) error { prevConvo, err := llm.Decode(ctx, prevContext) diff --git a/llm/llm.go b/llm/llm.go index 6df2a47c..4ae5dd2e 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -10,6 +10,7 @@ import ( "github.com/pbnjay/memory" "github.com/jmorganca/ollama/api" + "github.com/jmorganca/ollama/format" ) type LLM interface { @@ -60,33 +61,33 @@ func New(workDir, model string, adapters []string, opts api.Options) (LLM, error totalResidentMemory := memory.TotalMemory() switch ggml.ModelType() { case "3B", "7B": - if ggml.FileType() == "F16" && totalResidentMemory < 16*1000*1000 { + if ggml.FileType() == "F16" && totalResidentMemory < 16*format.GigaByte { return nil, fmt.Errorf("F16 model requires at least 16 GB of memory") - } else if totalResidentMemory < 8*1000*1000 { + } else if totalResidentMemory < 8*format.GigaByte { return nil, fmt.Errorf("model requires at least 8 GB of memory") } case "13B": - if ggml.FileType() == "F16" && totalResidentMemory < 32*1000*1000 { + if ggml.FileType() == "F16" && totalResidentMemory < 32*format.GigaByte { return nil, fmt.Errorf("F16 model requires at least 32 GB of memory") - } else if totalResidentMemory < 16*1000*1000 { + } else if totalResidentMemory < 16*format.GigaByte { return nil, fmt.Errorf("model requires at least 16 GB of memory") } case "30B", "34B", "40B": - if ggml.FileType() == "F16" && totalResidentMemory < 64*1000*1000 { + if ggml.FileType() == "F16" && totalResidentMemory < 64*format.GigaByte { return nil, fmt.Errorf("F16 model requires at least 64 GB of memory") - } else if totalResidentMemory < 32*1000*1000 { + } else if totalResidentMemory < 32*format.GigaByte { return nil, fmt.Errorf("model requires at least 32 GB of memory") } case "65B", "70B": - if ggml.FileType() == "F16" && totalResidentMemory < 128*1000*1000 { + if ggml.FileType() == "F16" && totalResidentMemory < 128*format.GigaByte { return nil, fmt.Errorf("F16 model requires at least 128 GB of memory") - } else if totalResidentMemory < 64*1000*1000 { + } else if totalResidentMemory < 64*format.GigaByte { return nil, fmt.Errorf("model requires at least 64 GB of memory") } case "180B": - if ggml.FileType() == "F16" && totalResidentMemory < 512*1000*1000 { + if ggml.FileType() == "F16" && totalResidentMemory < 512*format.GigaByte { return nil, fmt.Errorf("F16 model requires at least 512GB of memory") - } else if totalResidentMemory < 128*1000*1000 { + } else if totalResidentMemory < 128*format.GigaByte { return nil, fmt.Errorf("model requires at least 128GB of memory") } } From bd6e38fb1afed8c570d6a5f7eb87082f5221426a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 12 Oct 2023 09:47:17 -0700 Subject: [PATCH 2/5] refactor memory check --- llm/llm.go | 41 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/llm/llm.go b/llm/llm.go index 4ae5dd2e..a2619382 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -58,38 +58,27 @@ func New(workDir, model string, adapters []string, opts api.Options) (LLM, error } } - totalResidentMemory := memory.TotalMemory() + var requiredMemory int64 + var f16Multiplier int64 = 2 + totalResidentMemory := int64(memory.TotalMemory()) switch ggml.ModelType() { case "3B", "7B": - if ggml.FileType() == "F16" && totalResidentMemory < 16*format.GigaByte { - return nil, fmt.Errorf("F16 model requires at least 16 GB of memory") - } else if totalResidentMemory < 8*format.GigaByte { - return nil, fmt.Errorf("model requires at least 8 GB of memory") - } + requiredMemory = 8 * format.GigaByte case "13B": - if ggml.FileType() == "F16" && totalResidentMemory < 32*format.GigaByte { - return nil, fmt.Errorf("F16 model requires at least 32 GB of memory") - } else if totalResidentMemory < 16*format.GigaByte { - return nil, fmt.Errorf("model requires at least 16 GB of memory") - } + requiredMemory = 16 * format.GigaByte case "30B", "34B", "40B": - if ggml.FileType() == "F16" && totalResidentMemory < 64*format.GigaByte { - return nil, fmt.Errorf("F16 model requires at least 64 GB of memory") - } else if totalResidentMemory < 32*format.GigaByte { - return nil, fmt.Errorf("model requires at least 32 GB of memory") - } + requiredMemory = 32 * format.GigaByte case "65B", "70B": - if ggml.FileType() == "F16" && totalResidentMemory < 128*format.GigaByte { - return nil, fmt.Errorf("F16 model requires at least 128 GB of memory") - } else if totalResidentMemory < 64*format.GigaByte { - return nil, fmt.Errorf("model requires at least 64 GB of memory") - } + requiredMemory = 64 * format.GigaByte case "180B": - if ggml.FileType() == "F16" && totalResidentMemory < 512*format.GigaByte { - return nil, fmt.Errorf("F16 model requires at least 512GB of memory") - } else if totalResidentMemory < 128*format.GigaByte { - return nil, fmt.Errorf("model requires at least 128GB of memory") - } + requiredMemory = 128 * format.GigaByte + f16Multiplier = 4 + } + + if ggml.FileType() == "F16" && requiredMemory*f16Multiplier > totalResidentMemory { + return nil, fmt.Errorf("F16 model requires at least %s of memory", format.HumanBytes(requiredMemory)) + } else if requiredMemory > totalResidentMemory { + return nil, fmt.Errorf("model requires at least %s of memory", format.HumanBytes(requiredMemory)) } switch ggml.Name() { From 4a8931f634405ec3450c4e61a2619516b280dfc3 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 12 Oct 2023 10:36:23 -0700 Subject: [PATCH 3/5] check total (system + video) memory --- llm/llm.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/llm/llm.go b/llm/llm.go index a2619382..49405250 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -60,7 +60,7 @@ func New(workDir, model string, adapters []string, opts api.Options) (LLM, error var requiredMemory int64 var f16Multiplier int64 = 2 - totalResidentMemory := int64(memory.TotalMemory()) + switch ggml.ModelType() { case "3B", "7B": requiredMemory = 8 * format.GigaByte @@ -75,10 +75,19 @@ func New(workDir, model string, adapters []string, opts api.Options) (LLM, error f16Multiplier = 4 } - if ggml.FileType() == "F16" && requiredMemory*f16Multiplier > totalResidentMemory { - return nil, fmt.Errorf("F16 model requires at least %s of memory", format.HumanBytes(requiredMemory)) - } else if requiredMemory > totalResidentMemory { - return nil, fmt.Errorf("model requires at least %s of memory", format.HumanBytes(requiredMemory)) + systemMemory := int64(memory.TotalMemory()) + + videoMemory, err := CheckVRAM() + if err != nil{ + videoMemory = 0 + } + + totalMemory := systemMemory + videoMemory + + if ggml.FileType() == "F16" && requiredMemory*f16Multiplier > totalMemory { + return nil, fmt.Errorf("F16 model requires at least %s of total memory", format.HumanBytes(requiredMemory)) + } else if requiredMemory > totalMemory { + return nil, fmt.Errorf("model requires at least %s of total memory", format.HumanBytes(requiredMemory)) } switch ggml.Name() { From 36fe2deebf257a7602ab9b3f830ff3b074e04814 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 13 Oct 2023 14:41:51 -0700 Subject: [PATCH 4/5] only check system memory on macos --- llm/llm.go | 49 +++++++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/llm/llm.go b/llm/llm.go index 49405250..e25558f0 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -56,38 +56,31 @@ func New(workDir, model string, adapters []string, opts api.Options) (LLM, error opts.NumGPU = 0 } } - } - var requiredMemory int64 - var f16Multiplier int64 = 2 + var requiredMemory int64 + var f16Multiplier int64 = 2 - switch ggml.ModelType() { - case "3B", "7B": - requiredMemory = 8 * format.GigaByte - case "13B": - requiredMemory = 16 * format.GigaByte - case "30B", "34B", "40B": - requiredMemory = 32 * format.GigaByte - case "65B", "70B": - requiredMemory = 64 * format.GigaByte - case "180B": - requiredMemory = 128 * format.GigaByte - f16Multiplier = 4 - } + switch ggml.ModelType() { + case "3B", "7B": + requiredMemory = 8 * format.GigaByte + case "13B": + requiredMemory = 16 * format.GigaByte + case "30B", "34B", "40B": + requiredMemory = 32 * format.GigaByte + case "65B", "70B": + requiredMemory = 64 * format.GigaByte + case "180B": + requiredMemory = 128 * format.GigaByte + f16Multiplier = 4 + } - systemMemory := int64(memory.TotalMemory()) + systemMemory := int64(memory.TotalMemory()) - videoMemory, err := CheckVRAM() - if err != nil{ - videoMemory = 0 - } - - totalMemory := systemMemory + videoMemory - - if ggml.FileType() == "F16" && requiredMemory*f16Multiplier > totalMemory { - return nil, fmt.Errorf("F16 model requires at least %s of total memory", format.HumanBytes(requiredMemory)) - } else if requiredMemory > totalMemory { - return nil, fmt.Errorf("model requires at least %s of total memory", format.HumanBytes(requiredMemory)) + if ggml.FileType() == "F16" && requiredMemory*f16Multiplier > systemMemory { + return nil, fmt.Errorf("F16 model requires at least %s of total memory", format.HumanBytes(requiredMemory)) + } else if requiredMemory > systemMemory { + return nil, fmt.Errorf("model requires at least %s of total memory", format.HumanBytes(requiredMemory)) + } } switch ggml.Name() { From 11d82d7b9b94b971ee2965d9a683a80e4929097a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 13 Oct 2023 14:45:50 -0700 Subject: [PATCH 5/5] update checkvram --- llm/llama.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/llm/llama.go b/llm/llama.go index db51429c..80463eeb 100644 --- a/llm/llama.go +++ b/llm/llama.go @@ -24,6 +24,7 @@ import ( "time" "github.com/jmorganca/ollama/api" + "github.com/jmorganca/ollama/format" ) //go:embed llama.cpp/*/build/*/bin/* @@ -197,7 +198,7 @@ type llama struct { var errNoGPU = errors.New("nvidia-smi command failed") -// CheckVRAM returns the available VRAM in MiB on Linux machines with NVIDIA GPUs +// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs func CheckVRAM() (int64, error) { cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits") var stdout bytes.Buffer @@ -207,7 +208,7 @@ func CheckVRAM() (int64, error) { return 0, errNoGPU } - var free int64 + var freeMiB int64 scanner := bufio.NewScanner(&stdout) for scanner.Scan() { line := scanner.Text() @@ -216,15 +217,16 @@ func CheckVRAM() (int64, error) { return 0, fmt.Errorf("failed to parse available VRAM: %v", err) } - free += vram + freeMiB += vram } - if free*1024*1024 < 2*1000*1000*1000 { + freeBytes := freeMiB * 1024 * 1024 + if freeBytes < 2*format.GigaByte { log.Printf("less than 2 GB VRAM available, falling back to CPU only") - free = 0 + freeMiB = 0 } - return free, nil + return freeBytes, nil } func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int { @@ -232,7 +234,7 @@ func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int { return opts.NumGPU } if runtime.GOOS == "linux" { - vramMib, err := CheckVRAM() + freeBytes, err := CheckVRAM() if err != nil { if err.Error() != "nvidia-smi command failed" { log.Print(err.Error()) @@ -241,15 +243,13 @@ func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int { return 0 } - freeVramBytes := int64(vramMib) * 1024 * 1024 // 1 MiB = 1024^2 bytes - // Calculate bytes per layer // TODO: this is a rough heuristic, better would be to calculate this based on number of layers and context size bytesPerLayer := fileSizeBytes / numLayer // max number of layers we can fit in VRAM, subtract 8% to prevent consuming all available VRAM and running out of memory - layers := int(freeVramBytes/bytesPerLayer) * 92 / 100 - log.Printf("%d MiB VRAM available, loading up to %d GPU layers", vramMib, layers) + layers := int(freeBytes/bytesPerLayer) * 92 / 100 + log.Printf("%d MiB VRAM available, loading up to %d GPU layers", freeBytes, layers) return layers }