Merge pull request #65 from jmorganca/bindings
call llama.cpp directly from go
This commit is contained in:
commit
62620914e9
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -3,8 +3,5 @@
|
||||||
.env
|
.env
|
||||||
.venv
|
.venv
|
||||||
*.spec
|
*.spec
|
||||||
build
|
|
||||||
dist
|
dist
|
||||||
__pycache__
|
|
||||||
ollama
|
ollama
|
||||||
ggml-metal.metal
|
|
||||||
|
|
19
Makefile
19
Makefile
|
@ -1,19 +0,0 @@
|
||||||
default: ollama
|
|
||||||
|
|
||||||
.PHONY: llama
|
|
||||||
llama:
|
|
||||||
cmake -S llama -B llama/build -DLLAMA_METAL=on
|
|
||||||
cmake --build llama/build
|
|
||||||
|
|
||||||
.PHONY: ollama
|
|
||||||
ollama: llama
|
|
||||||
go build .
|
|
||||||
|
|
||||||
.PHONY: app
|
|
||||||
app: ollama
|
|
||||||
npm install --prefix app
|
|
||||||
npm run --prefix app make:sign
|
|
||||||
|
|
||||||
clean:
|
|
||||||
go clean
|
|
||||||
rm -rf llama/build
|
|
|
@ -75,7 +75,7 @@ ollama run ~/Downloads/vicuna-7b-v1.3.ggmlv3.q4_1.bin
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
```
|
```
|
||||||
make
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
To run it start the server:
|
To run it start the server:
|
||||||
|
|
155
api/types.go
155
api/types.go
|
@ -1,5 +1,7 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
type PullRequest struct {
|
type PullRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
}
|
}
|
||||||
|
@ -14,93 +16,76 @@ type GenerateRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
|
|
||||||
ModelOptions *ModelOptions `json:"model_opts,omitempty"`
|
Options `json:"options"`
|
||||||
PredictOptions *PredictOptions `json:"predict_opts,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ModelOptions struct {
|
|
||||||
ContextSize int `json:"context_size,omitempty"`
|
|
||||||
Seed int `json:"seed,omitempty"`
|
|
||||||
NBatch int `json:"n_batch,omitempty"`
|
|
||||||
F16Memory bool `json:"memory_f16,omitempty"`
|
|
||||||
MLock bool `json:"mlock,omitempty"`
|
|
||||||
MMap bool `json:"mmap,omitempty"`
|
|
||||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
|
||||||
LowVRAM bool `json:"low_vram,omitempty"`
|
|
||||||
Embeddings bool `json:"embeddings,omitempty"`
|
|
||||||
NUMA bool `json:"numa,omitempty"`
|
|
||||||
NGPULayers int `json:"gpu_layers,omitempty"`
|
|
||||||
MainGPU string `json:"main_gpu,omitempty"`
|
|
||||||
TensorSplit string `json:"tensor_split,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PredictOptions struct {
|
|
||||||
Seed int `json:"seed,omitempty"`
|
|
||||||
Threads int `json:"threads,omitempty"`
|
|
||||||
Tokens int `json:"tokens,omitempty"`
|
|
||||||
TopK int `json:"top_k,omitempty"`
|
|
||||||
Repeat int `json:"repeat,omitempty"`
|
|
||||||
Batch int `json:"batch,omitempty"`
|
|
||||||
NKeep int `json:"nkeep,omitempty"`
|
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
|
||||||
Temperature float64 `json:"temp,omitempty"`
|
|
||||||
Penalty float64 `json:"penalty,omitempty"`
|
|
||||||
F16KV bool
|
|
||||||
DebugMode bool
|
|
||||||
StopPrompts []string
|
|
||||||
IgnoreEOS bool `json:"ignore_eos,omitempty"`
|
|
||||||
|
|
||||||
TailFreeSamplingZ float64 `json:"tfs_z,omitempty"`
|
|
||||||
TypicalP float64 `json:"typical_p,omitempty"`
|
|
||||||
FrequencyPenalty float64 `json:"freq_penalty,omitempty"`
|
|
||||||
PresencePenalty float64 `json:"pres_penalty,omitempty"`
|
|
||||||
Mirostat int `json:"mirostat,omitempty"`
|
|
||||||
MirostatETA float64 `json:"mirostat_lr,omitempty"`
|
|
||||||
MirostatTAU float64 `json:"mirostat_ent,omitempty"`
|
|
||||||
PenalizeNL bool `json:"penalize_nl,omitempty"`
|
|
||||||
LogitBias string `json:"logit_bias,omitempty"`
|
|
||||||
|
|
||||||
PathPromptCache string
|
|
||||||
MLock bool `json:"mlock,omitempty"`
|
|
||||||
MMap bool `json:"mmap,omitempty"`
|
|
||||||
PromptCacheAll bool
|
|
||||||
PromptCacheRO bool
|
|
||||||
MainGPU string
|
|
||||||
TensorSplit string
|
|
||||||
}
|
|
||||||
|
|
||||||
var DefaultModelOptions ModelOptions = ModelOptions{
|
|
||||||
ContextSize: 512,
|
|
||||||
Seed: 0,
|
|
||||||
F16Memory: true,
|
|
||||||
MLock: false,
|
|
||||||
Embeddings: true,
|
|
||||||
MMap: true,
|
|
||||||
LowVRAM: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
var DefaultPredictOptions PredictOptions = PredictOptions{
|
|
||||||
Seed: -1,
|
|
||||||
Threads: -1,
|
|
||||||
Tokens: 512,
|
|
||||||
Penalty: 1.1,
|
|
||||||
Repeat: 64,
|
|
||||||
Batch: 512,
|
|
||||||
NKeep: 64,
|
|
||||||
TopK: 90,
|
|
||||||
TopP: 0.86,
|
|
||||||
TailFreeSamplingZ: 1.0,
|
|
||||||
TypicalP: 1.0,
|
|
||||||
Temperature: 0.8,
|
|
||||||
FrequencyPenalty: 0.0,
|
|
||||||
PresencePenalty: 0.0,
|
|
||||||
Mirostat: 0,
|
|
||||||
MirostatTAU: 5.0,
|
|
||||||
MirostatETA: 0.1,
|
|
||||||
MMap: true,
|
|
||||||
StopPrompts: []string{"llama"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GenerateResponse struct {
|
type GenerateResponse struct {
|
||||||
Response string `json:"response"`
|
Response string `json:"response"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
Seed int `json:"seed,omitempty"`
|
||||||
|
|
||||||
|
// Backend options
|
||||||
|
UseNUMA bool `json:"numa,omitempty"`
|
||||||
|
|
||||||
|
// Model options
|
||||||
|
NumCtx int `json:"num_ctx,omitempty"`
|
||||||
|
NumBatch int `json:"num_batch,omitempty"`
|
||||||
|
NumGPU int `json:"num_gpu,omitempty"`
|
||||||
|
MainGPU int `json:"main_gpu,omitempty"`
|
||||||
|
LowVRAM bool `json:"low_vram,omitempty"`
|
||||||
|
F16KV bool `json:"f16_kv,omitempty"`
|
||||||
|
LogitsAll bool `json:"logits_all,omitempty"`
|
||||||
|
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||||
|
UseMMap bool `json:"use_mmap,omitempty"`
|
||||||
|
UseMLock bool `json:"use_mlock,omitempty"`
|
||||||
|
EmbeddingOnly bool `json:"embedding_only,omitempty"`
|
||||||
|
|
||||||
|
// Predict options
|
||||||
|
RepeatLastN int `json:"repeat_last_n,omitempty"`
|
||||||
|
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
|
||||||
|
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
|
||||||
|
PresencePenalty float32 `json:"presence_penalty,omitempty"`
|
||||||
|
Temperature float32 `json:"temperature,omitempty"`
|
||||||
|
TopK int `json:"top_k,omitempty"`
|
||||||
|
TopP float32 `json:"top_p,omitempty"`
|
||||||
|
TFSZ float32 `json:"tfs_z,omitempty"`
|
||||||
|
TypicalP float32 `json:"typical_p,omitempty"`
|
||||||
|
Mirostat int `json:"mirostat,omitempty"`
|
||||||
|
MirostatTau float32 `json:"mirostat_tau,omitempty"`
|
||||||
|
MirostatEta float32 `json:"mirostat_eta,omitempty"`
|
||||||
|
|
||||||
|
NumThread int `json:"num_thread,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultOptions() Options {
|
||||||
|
return Options{
|
||||||
|
Seed: -1,
|
||||||
|
|
||||||
|
UseNUMA: false,
|
||||||
|
|
||||||
|
NumCtx: 512,
|
||||||
|
NumBatch: 512,
|
||||||
|
NumGPU: 1,
|
||||||
|
LowVRAM: false,
|
||||||
|
F16KV: true,
|
||||||
|
UseMMap: true,
|
||||||
|
UseMLock: false,
|
||||||
|
|
||||||
|
RepeatLastN: 512,
|
||||||
|
RepeatPenalty: 1.1,
|
||||||
|
FrequencyPenalty: 0.0,
|
||||||
|
PresencePenalty: 0.0,
|
||||||
|
Temperature: 0.8,
|
||||||
|
TopK: 40,
|
||||||
|
TopP: 0.9,
|
||||||
|
TFSZ: 1.0,
|
||||||
|
TypicalP: 1.0,
|
||||||
|
Mirostat: 0,
|
||||||
|
MirostatTau: 5.0,
|
||||||
|
MirostatEta: 0.1,
|
||||||
|
|
||||||
|
NumThread: runtime.NumCPU(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
1
go.mod
1
go.mod
|
@ -39,6 +39,7 @@ require (
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
golang.org/x/arch v0.3.0 // indirect
|
||||||
golang.org/x/crypto v0.10.0 // indirect
|
golang.org/x/crypto v0.10.0 // indirect
|
||||||
golang.org/x/net v0.10.0 // indirect
|
golang.org/x/net v0.10.0 // indirect
|
||||||
|
golang.org/x/sync v0.3.0
|
||||||
golang.org/x/sys v0.10.0 // indirect
|
golang.org/x/sys v0.10.0 // indirect
|
||||||
golang.org/x/term v0.10.0
|
golang.org/x/term v0.10.0
|
||||||
golang.org/x/text v0.10.0 // indirect
|
golang.org/x/text v0.10.0 // indirect
|
||||||
|
|
2
go.sum
2
go.sum
|
@ -99,6 +99,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||||
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
|
1
llama/.gitignore
vendored
1
llama/.gitignore
vendored
|
@ -1 +0,0 @@
|
||||||
build
|
|
|
@ -1,23 +0,0 @@
|
||||||
cmake_minimum_required(VERSION 3.12)
|
|
||||||
project(binding)
|
|
||||||
|
|
||||||
include(FetchContent)
|
|
||||||
|
|
||||||
FetchContent_Declare(
|
|
||||||
llama_cpp
|
|
||||||
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git
|
|
||||||
GIT_TAG 55dbb91
|
|
||||||
)
|
|
||||||
|
|
||||||
FetchContent_MakeAvailable(llama_cpp)
|
|
||||||
|
|
||||||
add_library(binding ${CMAKE_CURRENT_SOURCE_DIR}/binding/binding.cpp ${llama_cpp_SOURCE_DIR}/examples/common.cpp)
|
|
||||||
target_include_directories(binding PRIVATE ${llama_cpp_SOURCE_DIR}/examples)
|
|
||||||
target_link_libraries(binding llama ggml_static)
|
|
||||||
|
|
||||||
if (LLAMA_METAL)
|
|
||||||
configure_file(${llama_cpp_SOURCE_DIR}/ggml-metal.metal ${CMAKE_CURRENT_BINARY_DIR}/../../ggml-metal.metal COPYONLY)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_custom_target(copy_libllama ALL COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:llama> ${CMAKE_CURRENT_BINARY_DIR})
|
|
||||||
add_custom_target(copy_libggml_static ALL COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:ggml_static> ${CMAKE_CURRENT_BINARY_DIR})
|
|
|
@ -1,705 +0,0 @@
|
||||||
// MIT License
|
|
||||||
|
|
||||||
// Copyright (c) 2023 go-skynet authors
|
|
||||||
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
// of this software and associated documentation files (the "Software"), to deal
|
|
||||||
// in the Software without restriction, including without limitation the rights
|
|
||||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
// copies of the Software, and to permit persons to whom the Software is
|
|
||||||
// furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
// The above copyright notice and this permission notice shall be included in all
|
|
||||||
// copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
// SOFTWARE.
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "llama.h"
|
|
||||||
|
|
||||||
#include "binding.h"
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
#include <cinttypes>
|
|
||||||
#include <cmath>
|
|
||||||
#include <cstdio>
|
|
||||||
#include <cstring>
|
|
||||||
#include <fstream>
|
|
||||||
#include <iostream>
|
|
||||||
#include <regex>
|
|
||||||
#include <sstream>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
|
|
||||||
#include <signal.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#elif defined(_WIN32)
|
|
||||||
#define WIN32_LEAN_AND_MEAN
|
|
||||||
#define NOMINMAX
|
|
||||||
#include <signal.h>
|
|
||||||
#include <windows.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || \
|
|
||||||
defined(_WIN32)
|
|
||||||
void sigint_handler(int signo) {
|
|
||||||
if (signo == SIGINT) {
|
|
||||||
_exit(130);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings) {
|
|
||||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
|
||||||
llama_context *ctx = (llama_context *)state_pr;
|
|
||||||
gpt_params params = *params_p;
|
|
||||||
|
|
||||||
if (params.seed <= 0) {
|
|
||||||
params.seed = time(NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
|
||||||
|
|
||||||
llama_init_backend(params.numa);
|
|
||||||
|
|
||||||
int n_past = 0;
|
|
||||||
|
|
||||||
// Add a space in front of the first character to match OG llama tokenizer
|
|
||||||
// behavior
|
|
||||||
params.prompt.insert(0, 1, ' ');
|
|
||||||
|
|
||||||
// tokenize the prompt
|
|
||||||
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
|
||||||
|
|
||||||
// determine newline token
|
|
||||||
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
|
|
||||||
|
|
||||||
if (embd_inp.size() > 0) {
|
|
||||||
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past,
|
|
||||||
params.n_threads)) {
|
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const int n_embd = llama_n_embd(ctx);
|
|
||||||
|
|
||||||
const auto embeddings = llama_get_embeddings(ctx);
|
|
||||||
|
|
||||||
for (int i = 0; i < n_embd; i++) {
|
|
||||||
res_embeddings[i] = embeddings[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
|
|
||||||
int tokenSize, float *res_embeddings) {
|
|
||||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
|
||||||
llama_context *ctx = (llama_context *)state_pr;
|
|
||||||
gpt_params params = *params_p;
|
|
||||||
|
|
||||||
for (int i = 0; i < tokenSize; i++) {
|
|
||||||
auto token_str = llama_token_to_str(ctx, tokens[i]);
|
|
||||||
if (token_str == nullptr) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
std::vector<std::string> my_vector;
|
|
||||||
std::string str_token(token_str); // create a new std::string from the char*
|
|
||||||
params_p->prompt += str_token;
|
|
||||||
}
|
|
||||||
|
|
||||||
return get_embeddings(params_ptr, state_pr, res_embeddings);
|
|
||||||
}
|
|
||||||
|
|
||||||
int eval(void *params_ptr, void *state_pr, char *text) {
|
|
||||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
|
||||||
llama_context *ctx = (llama_context *)state_pr;
|
|
||||||
|
|
||||||
auto n_past = 0;
|
|
||||||
auto last_n_tokens_data =
|
|
||||||
std::vector<llama_token>(params_p->repeat_last_n, 0);
|
|
||||||
|
|
||||||
auto tokens = std::vector<llama_token>(params_p->n_ctx);
|
|
||||||
auto n_prompt_tokens =
|
|
||||||
llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);
|
|
||||||
|
|
||||||
if (n_prompt_tokens < 1) {
|
|
||||||
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// evaluate prompt
|
|
||||||
return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past,
|
|
||||||
params_p->n_threads);
|
|
||||||
}
|
|
||||||
|
|
||||||
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
|
||||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
|
||||||
llama_context *ctx = (llama_context *)state_pr;
|
|
||||||
|
|
||||||
gpt_params params = *params_p;
|
|
||||||
|
|
||||||
const int n_ctx = llama_n_ctx(ctx);
|
|
||||||
|
|
||||||
if (params.seed <= 0) {
|
|
||||||
params.seed = time(NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
|
||||||
|
|
||||||
std::string path_session = params.path_prompt_cache;
|
|
||||||
std::vector<llama_token> session_tokens;
|
|
||||||
|
|
||||||
if (!path_session.empty()) {
|
|
||||||
if (debug) {
|
|
||||||
fprintf(stderr, "%s: attempting to load saved session from '%s'\n",
|
|
||||||
__func__, path_session.c_str());
|
|
||||||
}
|
|
||||||
// fopen to check for existing session
|
|
||||||
FILE *fp = std::fopen(path_session.c_str(), "rb");
|
|
||||||
if (fp != NULL) {
|
|
||||||
std::fclose(fp);
|
|
||||||
|
|
||||||
session_tokens.resize(n_ctx);
|
|
||||||
size_t n_token_count_out = 0;
|
|
||||||
if (!llama_load_session_file(
|
|
||||||
ctx, path_session.c_str(), session_tokens.data(),
|
|
||||||
session_tokens.capacity(), &n_token_count_out)) {
|
|
||||||
fprintf(stderr, "%s: error: failed to load session file '%s'\n",
|
|
||||||
__func__, path_session.c_str());
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
session_tokens.resize(n_token_count_out);
|
|
||||||
llama_set_rng_seed(ctx, params.seed);
|
|
||||||
if (debug) {
|
|
||||||
fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n",
|
|
||||||
__func__, (int)session_tokens.size());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (debug) {
|
|
||||||
fprintf(stderr, "%s: session file does not exist, will create\n",
|
|
||||||
__func__);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<llama_token> embd_inp;
|
|
||||||
if (!params.prompt.empty() || session_tokens.empty()) {
|
|
||||||
// Add a space in front of the first character to match OG llama tokenizer
|
|
||||||
// behavior
|
|
||||||
params.prompt.insert(0, 1, ' ');
|
|
||||||
|
|
||||||
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
|
||||||
} else {
|
|
||||||
embd_inp = session_tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
// debug message about similarity of saved session, if applicable
|
|
||||||
size_t n_matching_session_tokens = 0;
|
|
||||||
if (session_tokens.size()) {
|
|
||||||
for (llama_token id : session_tokens) {
|
|
||||||
if (n_matching_session_tokens >= embd_inp.size() ||
|
|
||||||
id != embd_inp[n_matching_session_tokens]) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
n_matching_session_tokens++;
|
|
||||||
}
|
|
||||||
if (debug) {
|
|
||||||
if (params.prompt.empty() &&
|
|
||||||
n_matching_session_tokens == embd_inp.size()) {
|
|
||||||
fprintf(stderr, "%s: using full prompt from session file\n", __func__);
|
|
||||||
} else if (n_matching_session_tokens >= embd_inp.size()) {
|
|
||||||
fprintf(stderr, "%s: session file has exact match for prompt!\n",
|
|
||||||
__func__);
|
|
||||||
} else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
|
|
||||||
fprintf(stderr,
|
|
||||||
"%s: warning: session file has low similarity to prompt (%zu / "
|
|
||||||
"%zu tokens); will mostly be reevaluated\n",
|
|
||||||
__func__, n_matching_session_tokens, embd_inp.size());
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
|
|
||||||
__func__, n_matching_session_tokens, embd_inp.size());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if we will use the cache for the full prompt without reaching the end of
|
|
||||||
// the cache, force reevaluation of the last token token to recalculate the
|
|
||||||
// cached logits
|
|
||||||
if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() &&
|
|
||||||
session_tokens.size() > embd_inp.size()) {
|
|
||||||
session_tokens.resize(embd_inp.size() - 1);
|
|
||||||
}
|
|
||||||
// number of tokens to keep when resetting context
|
|
||||||
if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size()) {
|
|
||||||
params.n_keep = (int)embd_inp.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
// determine newline token
|
|
||||||
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
|
|
||||||
|
|
||||||
// TODO: replace with ring-buffer
|
|
||||||
std::vector<llama_token> last_n_tokens(n_ctx);
|
|
||||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
|
||||||
|
|
||||||
bool need_to_save_session =
|
|
||||||
!path_session.empty() && n_matching_session_tokens < embd_inp.size();
|
|
||||||
int n_past = 0;
|
|
||||||
int n_remain = params.n_predict;
|
|
||||||
int n_consumed = 0;
|
|
||||||
int n_session_consumed = 0;
|
|
||||||
|
|
||||||
std::vector<llama_token> embd;
|
|
||||||
std::string res = "";
|
|
||||||
|
|
||||||
// do one empty run to warm up the model
|
|
||||||
{
|
|
||||||
const std::vector<llama_token> tmp = {
|
|
||||||
llama_token_bos(),
|
|
||||||
};
|
|
||||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
|
||||||
llama_reset_timings(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (n_remain != 0) {
|
|
||||||
// predict
|
|
||||||
if (embd.size() > 0) {
|
|
||||||
// infinite text generation via context swapping
|
|
||||||
// if we run out of context:
|
|
||||||
// - take the n_keep first tokens from the original prompt (via n_past)
|
|
||||||
// - take half of the last (n_ctx - n_keep) tokens and recompute the
|
|
||||||
// logits in batches
|
|
||||||
if (n_past + (int)embd.size() > n_ctx) {
|
|
||||||
const int n_left = n_past - params.n_keep;
|
|
||||||
|
|
||||||
// always keep the first token - BOS
|
|
||||||
n_past = std::max(1, params.n_keep);
|
|
||||||
|
|
||||||
// insert n_left/2 tokens at the start of embd from last_n_tokens
|
|
||||||
embd.insert(embd.begin(),
|
|
||||||
last_n_tokens.begin() + n_ctx - n_left / 2 - embd.size(),
|
|
||||||
last_n_tokens.end() - embd.size());
|
|
||||||
|
|
||||||
// stop saving session if we run out of context
|
|
||||||
path_session.clear();
|
|
||||||
|
|
||||||
// printf("\n---\n");
|
|
||||||
// printf("resetting: '");
|
|
||||||
// for (int i = 0; i < (int) embd.size(); i++) {
|
|
||||||
// printf("%s", llama_token_to_str(ctx, embd[i]));
|
|
||||||
// }
|
|
||||||
// printf("'\n");
|
|
||||||
// printf("\n---\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// try to reuse a matching prefix from the loaded session instead of
|
|
||||||
// re-eval (via n_past)
|
|
||||||
if (n_session_consumed < (int)session_tokens.size()) {
|
|
||||||
size_t i = 0;
|
|
||||||
for (; i < embd.size(); i++) {
|
|
||||||
if (embd[i] != session_tokens[n_session_consumed]) {
|
|
||||||
session_tokens.resize(n_session_consumed);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
n_past++;
|
|
||||||
n_session_consumed++;
|
|
||||||
|
|
||||||
if (n_session_consumed >= (int)session_tokens.size()) {
|
|
||||||
++i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (i > 0) {
|
|
||||||
embd.erase(embd.begin(), embd.begin() + i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// evaluate tokens in batches
|
|
||||||
// embd is typically prepared beforehand to fit within a batch, but not
|
|
||||||
// always
|
|
||||||
for (int i = 0; i < (int)embd.size(); i += params.n_batch) {
|
|
||||||
int n_eval = (int)embd.size() - i;
|
|
||||||
if (n_eval > params.n_batch) {
|
|
||||||
n_eval = params.n_batch;
|
|
||||||
}
|
|
||||||
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
|
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
n_past += n_eval;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (embd.size() > 0 && !path_session.empty()) {
|
|
||||||
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
|
|
||||||
n_session_consumed = session_tokens.size();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
embd.clear();
|
|
||||||
|
|
||||||
if ((int)embd_inp.size() <= n_consumed) {
|
|
||||||
// out of user input, sample next token
|
|
||||||
const float temp = params.temp;
|
|
||||||
const int32_t top_k =
|
|
||||||
params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
|
|
||||||
const float top_p = params.top_p;
|
|
||||||
const float tfs_z = params.tfs_z;
|
|
||||||
const float typical_p = params.typical_p;
|
|
||||||
const int32_t repeat_last_n =
|
|
||||||
params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
|
|
||||||
const float repeat_penalty = params.repeat_penalty;
|
|
||||||
const float alpha_presence = params.presence_penalty;
|
|
||||||
const float alpha_frequency = params.frequency_penalty;
|
|
||||||
const int mirostat = params.mirostat;
|
|
||||||
const float mirostat_tau = params.mirostat_tau;
|
|
||||||
const float mirostat_eta = params.mirostat_eta;
|
|
||||||
const bool penalize_nl = params.penalize_nl;
|
|
||||||
|
|
||||||
// optionally save the session on first sample (for faster prompt loading
|
|
||||||
// next time)
|
|
||||||
if (!path_session.empty() && need_to_save_session &&
|
|
||||||
!params.prompt_cache_ro) {
|
|
||||||
need_to_save_session = false;
|
|
||||||
llama_save_session_file(ctx, path_session.c_str(),
|
|
||||||
session_tokens.data(), session_tokens.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_token id = 0;
|
|
||||||
|
|
||||||
{
|
|
||||||
auto logits = llama_get_logits(ctx);
|
|
||||||
auto n_vocab = llama_n_vocab(ctx);
|
|
||||||
|
|
||||||
// Apply params.logit_bias map
|
|
||||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end();
|
|
||||||
it++) {
|
|
||||||
logits[it->first] += it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<llama_token_data> candidates;
|
|
||||||
candidates.reserve(n_vocab);
|
|
||||||
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
|
||||||
candidates.emplace_back(
|
|
||||||
llama_token_data{token_id, logits[token_id], 0.0f});
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_token_data_array candidates_p = {candidates.data(),
|
|
||||||
candidates.size(), false};
|
|
||||||
|
|
||||||
// Apply penalties
|
|
||||||
float nl_logit = logits[llama_token_nl()];
|
|
||||||
auto last_n_repeat =
|
|
||||||
std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
|
||||||
llama_sample_repetition_penalty(
|
|
||||||
ctx, &candidates_p,
|
|
||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
|
||||||
last_n_repeat, repeat_penalty);
|
|
||||||
llama_sample_frequency_and_presence_penalties(
|
|
||||||
ctx, &candidates_p,
|
|
||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
|
||||||
last_n_repeat, alpha_frequency, alpha_presence);
|
|
||||||
if (!penalize_nl) {
|
|
||||||
logits[llama_token_nl()] = nl_logit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (temp <= 0) {
|
|
||||||
// Greedy sampling
|
|
||||||
id = llama_sample_token_greedy(ctx, &candidates_p);
|
|
||||||
} else {
|
|
||||||
if (mirostat == 1) {
|
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
|
||||||
const int mirostat_m = 100;
|
|
||||||
llama_sample_temperature(ctx, &candidates_p, temp);
|
|
||||||
id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau,
|
|
||||||
mirostat_eta, mirostat_m,
|
|
||||||
&mirostat_mu);
|
|
||||||
} else if (mirostat == 2) {
|
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
|
||||||
llama_sample_temperature(ctx, &candidates_p, temp);
|
|
||||||
id = llama_sample_token_mirostat_v2(
|
|
||||||
ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
|
||||||
} else {
|
|
||||||
// Temperature sampling
|
|
||||||
llama_sample_top_k(ctx, &candidates_p, top_k, 1);
|
|
||||||
llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
|
|
||||||
llama_sample_typical(ctx, &candidates_p, typical_p, 1);
|
|
||||||
llama_sample_top_p(ctx, &candidates_p, top_p, 1);
|
|
||||||
llama_sample_temperature(ctx, &candidates_p, temp);
|
|
||||||
id = llama_sample_token(ctx, &candidates_p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// printf("`%d`", candidates_p.size);
|
|
||||||
|
|
||||||
last_n_tokens.erase(last_n_tokens.begin());
|
|
||||||
last_n_tokens.push_back(id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// add it to the context
|
|
||||||
embd.push_back(id);
|
|
||||||
|
|
||||||
// decrement remaining sampling budget
|
|
||||||
--n_remain;
|
|
||||||
|
|
||||||
// call the token callback, no need to check if one is actually
|
|
||||||
// registered, that will be handled on the Go side.
|
|
||||||
auto token_str = llama_token_to_str(ctx, id);
|
|
||||||
if (!tokenCallback(state_pr, (char *)token_str)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// some user input remains from prompt or interaction, forward it to
|
|
||||||
// processing
|
|
||||||
while ((int)embd_inp.size() > n_consumed) {
|
|
||||||
embd.push_back(embd_inp[n_consumed]);
|
|
||||||
last_n_tokens.erase(last_n_tokens.begin());
|
|
||||||
last_n_tokens.push_back(embd_inp[n_consumed]);
|
|
||||||
++n_consumed;
|
|
||||||
if ((int)embd.size() >= params.n_batch) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto id : embd) {
|
|
||||||
res += llama_token_to_str(ctx, id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check for stop prompt
|
|
||||||
if (params.antiprompt.size()) {
|
|
||||||
std::string last_output;
|
|
||||||
for (auto id : last_n_tokens) {
|
|
||||||
last_output += llama_token_to_str(ctx, id);
|
|
||||||
}
|
|
||||||
// Check if each of the reverse prompts appears at the end of the output.
|
|
||||||
for (std::string &antiprompt : params.antiprompt) {
|
|
||||||
// size_t extra_padding = params.interactive ? 0 : 2;
|
|
||||||
size_t extra_padding = 2;
|
|
||||||
size_t search_start_pos =
|
|
||||||
last_output.length() >
|
|
||||||
static_cast<size_t>(antiprompt.length() + extra_padding)
|
|
||||||
? last_output.length() -
|
|
||||||
static_cast<size_t>(antiprompt.length() + extra_padding)
|
|
||||||
: 0;
|
|
||||||
|
|
||||||
if (last_output.find(antiprompt.c_str(), search_start_pos) !=
|
|
||||||
std::string::npos) {
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// end of text token
|
|
||||||
if (!embd.empty() && embd.back() == llama_token_eos()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!path_session.empty() && params.prompt_cache_all &&
|
|
||||||
!params.prompt_cache_ro) {
|
|
||||||
if (debug) {
|
|
||||||
fprintf(stderr, "\n%s: saving final output to session file '%s'\n",
|
|
||||||
__func__, path_session.c_str());
|
|
||||||
}
|
|
||||||
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(),
|
|
||||||
session_tokens.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
|
||||||
#if defined(_WIN32)
|
|
||||||
signal(SIGINT, SIG_DFL);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (debug) {
|
|
||||||
llama_print_timings(ctx);
|
|
||||||
llama_reset_timings(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
strcpy(result, res.c_str());
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void llama_binding_free_model(void *state_ptr) {
|
|
||||||
llama_context *ctx = (llama_context *)state_ptr;
|
|
||||||
llama_free(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void llama_free_params(void *params_ptr) {
|
|
||||||
gpt_params *params = (gpt_params *)params_ptr;
|
|
||||||
delete params;
|
|
||||||
}
|
|
||||||
|
|
||||||
int load_state(void *ctx, char *statefile, char *modes) {
|
|
||||||
llama_context *state = (llama_context *)ctx;
|
|
||||||
const llama_context *constState = static_cast<const llama_context *>(state);
|
|
||||||
const size_t state_size = llama_get_state_size(state);
|
|
||||||
uint8_t *state_mem = new uint8_t[state_size];
|
|
||||||
|
|
||||||
{
|
|
||||||
FILE *fp_read = fopen(statefile, modes);
|
|
||||||
if (state_size != llama_get_state_size(constState)) {
|
|
||||||
fprintf(stderr, "\n%s : failed to validate state size\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t ret = fread(state_mem, 1, state_size, fp_read);
|
|
||||||
if (ret != state_size) {
|
|
||||||
fprintf(stderr, "\n%s : failed to read state\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_set_state_data(
|
|
||||||
state, state_mem); // could also read directly from memory mapped file
|
|
||||||
fclose(fp_read);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void save_state(void *ctx, char *dst, char *modes) {
|
|
||||||
llama_context *state = (llama_context *)ctx;
|
|
||||||
|
|
||||||
const size_t state_size = llama_get_state_size(state);
|
|
||||||
uint8_t *state_mem = new uint8_t[state_size];
|
|
||||||
|
|
||||||
// Save state (rng, logits, embedding and kv_cache) to file
|
|
||||||
{
|
|
||||||
FILE *fp_write = fopen(dst, modes);
|
|
||||||
llama_copy_state_data(
|
|
||||||
state, state_mem); // could also copy directly to memory mapped file
|
|
||||||
fwrite(state_mem, 1, state_size, fp_write);
|
|
||||||
fclose(fp_write);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void *llama_allocate_params(
|
|
||||||
const char *prompt, int seed, int threads, int tokens, int top_k,
|
|
||||||
float top_p, float temp, float repeat_penalty, int repeat_last_n,
|
|
||||||
bool ignore_eos, bool memory_f16, int n_batch, int n_keep,
|
|
||||||
const char **antiprompt, int antiprompt_count, float tfs_z, float typical_p,
|
|
||||||
float frequency_penalty, float presence_penalty, int mirostat,
|
|
||||||
float mirostat_eta, float mirostat_tau, bool penalize_nl,
|
|
||||||
const char *logit_bias, bool mlock, bool mmap, const char *maingpu,
|
|
||||||
const char *tensorsplit) {
|
|
||||||
gpt_params *params = new gpt_params;
|
|
||||||
params->seed = seed;
|
|
||||||
params->n_threads = threads;
|
|
||||||
params->n_predict = tokens;
|
|
||||||
params->repeat_last_n = repeat_last_n;
|
|
||||||
params->top_k = top_k;
|
|
||||||
params->top_p = top_p;
|
|
||||||
params->memory_f16 = memory_f16;
|
|
||||||
params->temp = temp;
|
|
||||||
params->use_mmap = mmap;
|
|
||||||
params->use_mlock = mlock;
|
|
||||||
params->repeat_penalty = repeat_penalty;
|
|
||||||
params->n_batch = n_batch;
|
|
||||||
params->n_keep = n_keep;
|
|
||||||
if (maingpu[0] != '\0') {
|
|
||||||
params->main_gpu = std::stoi(maingpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tensorsplit[0] != '\0') {
|
|
||||||
std::string arg_next = tensorsplit;
|
|
||||||
// split string by , and /
|
|
||||||
const std::regex regex{R"([,/]+)"};
|
|
||||||
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
|
|
||||||
std::vector<std::string> split_arg{it, {}};
|
|
||||||
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
|
|
||||||
if (i < split_arg.size()) {
|
|
||||||
params->tensor_split[i] = std::stof(split_arg[i]);
|
|
||||||
} else {
|
|
||||||
params->tensor_split[i] = 0.0f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ignore_eos) {
|
|
||||||
params->logit_bias[llama_token_eos()] = -INFINITY;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < antiprompt_count; i++) {
|
|
||||||
params->antiprompt.push_back(antiprompt[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
params->tfs_z = tfs_z;
|
|
||||||
params->typical_p = typical_p;
|
|
||||||
params->presence_penalty = presence_penalty;
|
|
||||||
params->mirostat = mirostat;
|
|
||||||
params->mirostat_eta = mirostat_eta;
|
|
||||||
params->mirostat_tau = mirostat_tau;
|
|
||||||
params->penalize_nl = penalize_nl;
|
|
||||||
std::stringstream ss(logit_bias);
|
|
||||||
llama_token key;
|
|
||||||
char sign;
|
|
||||||
std::string value_str;
|
|
||||||
if (ss >> key && ss >> sign && std::getline(ss, value_str) &&
|
|
||||||
(sign == '+' || sign == '-')) {
|
|
||||||
params->logit_bias[key] =
|
|
||||||
std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
|
|
||||||
}
|
|
||||||
params->frequency_penalty = frequency_penalty;
|
|
||||||
params->prompt = prompt;
|
|
||||||
|
|
||||||
return params;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
|
|
||||||
bool mlock, bool embeddings, bool mmap, bool low_vram,
|
|
||||||
bool vocab_only, int n_gpu_layers, int n_batch,
|
|
||||||
const char *maingpu, const char *tensorsplit, bool numa) {
|
|
||||||
// load the model
|
|
||||||
auto lparams = llama_context_default_params();
|
|
||||||
|
|
||||||
lparams.n_ctx = n_ctx;
|
|
||||||
lparams.seed = n_seed;
|
|
||||||
lparams.f16_kv = memory_f16;
|
|
||||||
lparams.embedding = embeddings;
|
|
||||||
lparams.use_mlock = mlock;
|
|
||||||
lparams.n_gpu_layers = n_gpu_layers;
|
|
||||||
lparams.use_mmap = mmap;
|
|
||||||
lparams.low_vram = low_vram;
|
|
||||||
lparams.vocab_only = vocab_only;
|
|
||||||
|
|
||||||
if (maingpu[0] != '\0') {
|
|
||||||
lparams.main_gpu = std::stoi(maingpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tensorsplit[0] != '\0') {
|
|
||||||
std::string arg_next = tensorsplit;
|
|
||||||
// split string by , and /
|
|
||||||
const std::regex regex{R"([,/]+)"};
|
|
||||||
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
|
|
||||||
std::vector<std::string> split_arg{it, {}};
|
|
||||||
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
|
|
||||||
if (i < split_arg.size()) {
|
|
||||||
lparams.tensor_split[i] = std::stof(split_arg[i]);
|
|
||||||
} else {
|
|
||||||
lparams.tensor_split[i] = 0.0f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lparams.n_batch = n_batch;
|
|
||||||
|
|
||||||
llama_init_backend(numa);
|
|
||||||
void *res = nullptr;
|
|
||||||
try {
|
|
||||||
res = llama_init_from_file(fname, lparams);
|
|
||||||
} catch (std::runtime_error &e) {
|
|
||||||
fprintf(stderr, "failed %s", e.what());
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
// MIT License
|
|
||||||
|
|
||||||
// Copyright (c) 2023 go-skynet authors
|
|
||||||
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
// of this software and associated documentation files (the "Software"), to deal
|
|
||||||
// in the Software without restriction, including without limitation the rights
|
|
||||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
// copies of the Software, and to permit persons to whom the Software is
|
|
||||||
// furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
// The above copyright notice and this permission notice shall be included in all
|
|
||||||
// copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
// SOFTWARE.
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <stdbool.h>
|
|
||||||
|
|
||||||
extern unsigned char tokenCallback(void *, char *);
|
|
||||||
|
|
||||||
int load_state(void *ctx, char *statefile, char *modes);
|
|
||||||
|
|
||||||
int eval(void *params_ptr, void *ctx, char *text);
|
|
||||||
|
|
||||||
void save_state(void *ctx, char *dst, char *modes);
|
|
||||||
|
|
||||||
void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
|
|
||||||
bool mlock, bool embeddings, bool mmap, bool low_vram,
|
|
||||||
bool vocab_only, int n_gpu, int n_batch, const char *maingpu,
|
|
||||||
const char *tensorsplit, bool numa);
|
|
||||||
|
|
||||||
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings);
|
|
||||||
|
|
||||||
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
|
|
||||||
int tokenSize, float *res_embeddings);
|
|
||||||
|
|
||||||
void *llama_allocate_params(
|
|
||||||
const char *prompt, int seed, int threads, int tokens, int top_k,
|
|
||||||
float top_p, float temp, float repeat_penalty, int repeat_last_n,
|
|
||||||
bool ignore_eos, bool memory_f16, int n_batch, int n_keep,
|
|
||||||
const char **antiprompt, int antiprompt_count, float tfs_z, float typical_p,
|
|
||||||
float frequency_penalty, float presence_penalty, int mirostat,
|
|
||||||
float mirostat_eta, float mirostat_tau, bool penalize_nl,
|
|
||||||
const char *logit_bias, bool mlock, bool mmap, const char *maingpu,
|
|
||||||
const char *tensorsplit);
|
|
||||||
|
|
||||||
void llama_free_params(void *params_ptr);
|
|
||||||
|
|
||||||
void llama_binding_free_model(void *state);
|
|
||||||
|
|
||||||
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
3414
llama/ggml-cuda.cu
Normal file
3414
llama/ggml-cuda.cu
Normal file
File diff suppressed because it is too large
Load diff
62
llama/ggml-cuda.h
Normal file
62
llama/ggml-cuda.h
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
/**
|
||||||
|
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
|
||||||
|
*
|
||||||
|
* MIT License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2023 Georgi Gerganov
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
* SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define GGML_CUDA_MAX_DEVICES 16
|
||||||
|
|
||||||
|
void ggml_init_cublas(void);
|
||||||
|
void ggml_cuda_set_tensor_split(const float * tensor_split);
|
||||||
|
|
||||||
|
void ggml_cuda_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
|
bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
|
size_t ggml_cuda_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
|
void ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
||||||
|
|
||||||
|
// TODO: export these with GGML_API
|
||||||
|
void * ggml_cuda_host_malloc(size_t size);
|
||||||
|
void ggml_cuda_host_free(void * ptr);
|
||||||
|
|
||||||
|
void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
|
||||||
|
|
||||||
|
void ggml_cuda_free_data(struct ggml_tensor * tensor);
|
||||||
|
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
|
||||||
|
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
|
||||||
|
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
|
||||||
|
void ggml_cuda_set_main_device(int main_device);
|
||||||
|
void ggml_cuda_set_scratch_size(size_t scratch_size);
|
||||||
|
void ggml_cuda_free_scratch(void);
|
||||||
|
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
97
llama/ggml-metal.h
Normal file
97
llama/ggml-metal.h
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
/**
|
||||||
|
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
|
||||||
|
*
|
||||||
|
* MIT License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2023 Georgi Gerganov
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
* SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// An interface allowing to compute ggml_cgraph with Metal
|
||||||
|
//
|
||||||
|
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
|
||||||
|
// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
|
||||||
|
//
|
||||||
|
// How it works?
|
||||||
|
//
|
||||||
|
// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
|
||||||
|
// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
|
||||||
|
// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
|
||||||
|
//
|
||||||
|
// You only need to make sure that all memory buffers that you used during the graph creation
|
||||||
|
// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
|
||||||
|
// used during the graph evaluation to determine the arguments of the compute kernels.
|
||||||
|
//
|
||||||
|
// Synchronization between device and host memory (for example for input and output tensors)
|
||||||
|
// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
|
||||||
|
// max memory buffers that can be mapped to the device
|
||||||
|
#define GGML_METAL_MAX_BUFFERS 16
|
||||||
|
|
||||||
|
struct ggml_tensor;
|
||||||
|
struct ggml_cgraph;
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct ggml_metal_context;
|
||||||
|
|
||||||
|
// number of command buffers to use
|
||||||
|
struct ggml_metal_context * ggml_metal_init(int n_cb);
|
||||||
|
void ggml_metal_free(struct ggml_metal_context * ctx);
|
||||||
|
|
||||||
|
// set the number of command buffers to use
|
||||||
|
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb);
|
||||||
|
|
||||||
|
// creates a mapping between a host memory buffer and a device memory buffer
|
||||||
|
// - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute
|
||||||
|
// - the mapping is used during computation to determine the arguments of the compute kernels
|
||||||
|
// - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
|
||||||
|
// - max_size specifies the maximum size of a tensor and is used to create shared views such
|
||||||
|
// that it is guaranteed that the tensor will fit in at least one of the views
|
||||||
|
//
|
||||||
|
bool ggml_metal_add_buffer(
|
||||||
|
struct ggml_metal_context * ctx,
|
||||||
|
const char * name,
|
||||||
|
void * data,
|
||||||
|
size_t size,
|
||||||
|
size_t max_size);
|
||||||
|
|
||||||
|
// set data from host memory into the device
|
||||||
|
void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
|
||||||
|
|
||||||
|
// get data from the device into host memory
|
||||||
|
void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
|
||||||
|
|
||||||
|
// same as ggml_graph_compute but uses Metal
|
||||||
|
// creates gf->n_threads command buffers in parallel
|
||||||
|
void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
1014
llama/ggml-metal.m
Normal file
1014
llama/ggml-metal.m
Normal file
File diff suppressed because it is too large
Load diff
1855
llama/ggml-metal.metal
Normal file
1855
llama/ggml-metal.metal
Normal file
File diff suppressed because it is too large
Load diff
18380
llama/ggml.c
Normal file
18380
llama/ggml.c
Normal file
File diff suppressed because it is too large
Load diff
1575
llama/ggml.h
Normal file
1575
llama/ggml.h
Normal file
File diff suppressed because it is too large
Load diff
3926
llama/k_quants.c
Normal file
3926
llama/k_quants.c
Normal file
File diff suppressed because it is too large
Load diff
183
llama/k_quants.h
Normal file
183
llama/k_quants.h
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
/**
|
||||||
|
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
|
||||||
|
*
|
||||||
|
* MIT License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2023 Georgi Gerganov
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
* SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <assert.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
// Super-block size
|
||||||
|
#ifdef GGML_QKK_64
|
||||||
|
#define QK_K 64
|
||||||
|
#define K_SCALE_SIZE 4
|
||||||
|
#else
|
||||||
|
#define QK_K 256
|
||||||
|
#define K_SCALE_SIZE 12
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//
|
||||||
|
// Super-block quantization structures
|
||||||
|
//
|
||||||
|
|
||||||
|
// 2-bit quantization
|
||||||
|
// weight is represented as x = a * q + b
|
||||||
|
// 16 blocks of 16 elemenets each
|
||||||
|
// Effectively 2.5625 bits per weight
|
||||||
|
typedef struct {
|
||||||
|
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
||||||
|
uint8_t qs[QK_K/4]; // quants
|
||||||
|
ggml_fp16_t d; // super-block scale for quantized scales
|
||||||
|
ggml_fp16_t dmin; // super-block scale for quantized mins
|
||||||
|
} block_q2_K;
|
||||||
|
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
|
||||||
|
|
||||||
|
// 3-bit quantization
|
||||||
|
// weight is represented as x = a * q
|
||||||
|
// 16 blocks of 16 elemenets each
|
||||||
|
// Effectively 3.4375 bits per weight
|
||||||
|
#ifdef GGML_QKK_64
|
||||||
|
typedef struct {
|
||||||
|
uint8_t hmask[QK_K/8]; // quants - high bit
|
||||||
|
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
||||||
|
uint8_t scales[2];
|
||||||
|
ggml_fp16_t d; // super-block scale
|
||||||
|
} block_q3_K;
|
||||||
|
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
|
||||||
|
#else
|
||||||
|
typedef struct {
|
||||||
|
uint8_t hmask[QK_K/8]; // quants - high bit
|
||||||
|
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
||||||
|
uint8_t scales[12]; // scales, quantized with 6 bits
|
||||||
|
ggml_fp16_t d; // super-block scale
|
||||||
|
} block_q3_K;
|
||||||
|
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// 4-bit quantization
|
||||||
|
// 16 blocks of 32 elements each
|
||||||
|
// weight is represented as x = a * q + b
|
||||||
|
// Effectively 4.5 bits per weight
|
||||||
|
#ifdef GGML_QKK_64
|
||||||
|
typedef struct {
|
||||||
|
ggml_fp16_t d[2]; // super-block scales/mins
|
||||||
|
uint8_t scales[2]; // 4-bit block scales/mins
|
||||||
|
uint8_t qs[QK_K/2]; // 4--bit quants
|
||||||
|
} block_q4_K;
|
||||||
|
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
|
||||||
|
#else
|
||||||
|
typedef struct {
|
||||||
|
ggml_fp16_t d; // super-block scale for quantized scales
|
||||||
|
ggml_fp16_t dmin; // super-block scale for quantized mins
|
||||||
|
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
||||||
|
uint8_t qs[QK_K/2]; // 4--bit quants
|
||||||
|
} block_q4_K;
|
||||||
|
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// 5-bit quantization
|
||||||
|
// 16 blocks of 32 elements each
|
||||||
|
// weight is represented as x = a * q + b
|
||||||
|
// Effectively 5.5 bits per weight
|
||||||
|
#ifdef GGML_QKK_64
|
||||||
|
typedef struct {
|
||||||
|
ggml_fp16_t d; // super-block scale
|
||||||
|
int8_t scales[QK_K/16]; // 8-bit block scales
|
||||||
|
uint8_t qh[QK_K/8]; // quants, high bit
|
||||||
|
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
||||||
|
} block_q5_K;
|
||||||
|
static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
|
||||||
|
#else
|
||||||
|
typedef struct {
|
||||||
|
ggml_fp16_t d; // super-block scale for quantized scales
|
||||||
|
ggml_fp16_t dmin; // super-block scale for quantized mins
|
||||||
|
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
||||||
|
uint8_t qh[QK_K/8]; // quants, high bit
|
||||||
|
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
||||||
|
} block_q5_K;
|
||||||
|
static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// 6-bit quantization
|
||||||
|
// weight is represented as x = a * q
|
||||||
|
// 16 blocks of 16 elemenets each
|
||||||
|
// Effectively 6.5625 bits per weight
|
||||||
|
typedef struct {
|
||||||
|
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
||||||
|
uint8_t qh[QK_K/4]; // quants, upper 2 bits
|
||||||
|
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
|
||||||
|
ggml_fp16_t d; // super-block scale
|
||||||
|
} block_q6_K;
|
||||||
|
static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
|
||||||
|
|
||||||
|
// This is only used for intermediate quantization and dot products
|
||||||
|
typedef struct {
|
||||||
|
float d; // delta
|
||||||
|
int8_t qs[QK_K]; // quants
|
||||||
|
int16_t bsums[QK_K/16]; // sum of quants in groups of 16
|
||||||
|
} block_q8_K;
|
||||||
|
static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
|
||||||
|
|
||||||
|
|
||||||
|
// Quantization
|
||||||
|
void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k);
|
||||||
|
void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);
|
||||||
|
void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);
|
||||||
|
void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
|
||||||
|
void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
|
||||||
|
void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
|
||||||
|
|
||||||
|
void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);
|
||||||
|
void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);
|
||||||
|
void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
|
||||||
|
void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
|
||||||
|
void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
|
||||||
|
void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
|
||||||
|
|
||||||
|
// Dequantization
|
||||||
|
void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k);
|
||||||
|
void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);
|
||||||
|
void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);
|
||||||
|
void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
|
||||||
|
void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
|
||||||
|
void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
|
||||||
|
|
||||||
|
// Dot product
|
||||||
|
void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
|
void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
|
void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
|
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
|
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
||||||
|
|
||||||
|
// Quantization with histogram collection
|
||||||
|
size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
|
size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
|
size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
|
size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
|
size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||||
|
|
530
llama/llama-util.h
Normal file
530
llama/llama-util.h
Normal file
|
@ -0,0 +1,530 @@
|
||||||
|
/**
|
||||||
|
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
|
||||||
|
*
|
||||||
|
* MIT License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2023 Georgi Gerganov
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
* SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Internal header to be included only by llama.cpp.
|
||||||
|
// Contains wrappers around OS interfaces.
|
||||||
|
|
||||||
|
#ifndef LLAMA_UTIL_H
|
||||||
|
#define LLAMA_UTIL_H
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cerrno>
|
||||||
|
#include <cstring>
|
||||||
|
#include <cstdarg>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <climits>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
|
#ifdef __has_include
|
||||||
|
#if __has_include(<unistd.h>)
|
||||||
|
#include <unistd.h>
|
||||||
|
#if defined(_POSIX_MAPPED_FILES)
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
#if defined(_POSIX_MEMLOCK_RANGE)
|
||||||
|
#include <sys/resource.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#define WIN32_LEAN_AND_MEAN
|
||||||
|
#ifndef NOMINMAX
|
||||||
|
#define NOMINMAX
|
||||||
|
#endif
|
||||||
|
#include <windows.h>
|
||||||
|
#include <io.h>
|
||||||
|
#include <stdio.h> // for _fseeki64
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define LLAMA_ASSERT(x) \
|
||||||
|
do { \
|
||||||
|
if (!(x)) { \
|
||||||
|
fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#ifdef __GNUC__
|
||||||
|
#ifdef __MINGW32__
|
||||||
|
__attribute__((format(gnu_printf, 1, 2)))
|
||||||
|
#else
|
||||||
|
__attribute__((format(printf, 1, 2)))
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
static std::string format(const char * fmt, ...) {
|
||||||
|
va_list ap, ap2;
|
||||||
|
va_start(ap, fmt);
|
||||||
|
va_copy(ap2, ap);
|
||||||
|
int size = vsnprintf(NULL, 0, fmt, ap);
|
||||||
|
LLAMA_ASSERT(size >= 0 && size < INT_MAX);
|
||||||
|
std::vector<char> buf(size + 1);
|
||||||
|
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
||||||
|
LLAMA_ASSERT(size2 == size);
|
||||||
|
va_end(ap2);
|
||||||
|
va_end(ap);
|
||||||
|
return std::string(buf.data(), size);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llama_file {
|
||||||
|
// use FILE * so we don't have to re-open the file to mmap
|
||||||
|
FILE * fp;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
llama_file(const char * fname, const char * mode) {
|
||||||
|
fp = std::fopen(fname, mode);
|
||||||
|
if (fp == NULL) {
|
||||||
|
throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
|
||||||
|
}
|
||||||
|
seek(0, SEEK_END);
|
||||||
|
size = tell();
|
||||||
|
seek(0, SEEK_SET);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t tell() const {
|
||||||
|
#ifdef _WIN32
|
||||||
|
__int64 ret = _ftelli64(fp);
|
||||||
|
#else
|
||||||
|
long ret = std::ftell(fp);
|
||||||
|
#endif
|
||||||
|
LLAMA_ASSERT(ret != -1); // this really shouldn't fail
|
||||||
|
return (size_t) ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void seek(size_t offset, int whence) {
|
||||||
|
#ifdef _WIN32
|
||||||
|
int ret = _fseeki64(fp, (__int64) offset, whence);
|
||||||
|
#else
|
||||||
|
int ret = std::fseek(fp, (long) offset, whence);
|
||||||
|
#endif
|
||||||
|
LLAMA_ASSERT(ret == 0); // same
|
||||||
|
}
|
||||||
|
|
||||||
|
void read_raw(void * ptr, size_t len) const {
|
||||||
|
if (len == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
errno = 0;
|
||||||
|
std::size_t ret = std::fread(ptr, len, 1, fp);
|
||||||
|
if (ferror(fp)) {
|
||||||
|
throw std::runtime_error(format("read error: %s", strerror(errno)));
|
||||||
|
}
|
||||||
|
if (ret != 1) {
|
||||||
|
throw std::runtime_error(std::string("unexpectedly reached end of file"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::uint32_t read_u32() {
|
||||||
|
std::uint32_t ret;
|
||||||
|
read_raw(&ret, sizeof(ret));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string read_string(std::uint32_t len) {
|
||||||
|
std::vector<char> chars(len);
|
||||||
|
read_raw(chars.data(), len);
|
||||||
|
return std::string(chars.data(), len);
|
||||||
|
}
|
||||||
|
|
||||||
|
void write_raw(const void * ptr, size_t len) const {
|
||||||
|
if (len == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
errno = 0;
|
||||||
|
size_t ret = std::fwrite(ptr, len, 1, fp);
|
||||||
|
if (ret != 1) {
|
||||||
|
throw std::runtime_error(format("write error: %s", strerror(errno)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void write_u32(std::uint32_t val) {
|
||||||
|
write_raw(&val, sizeof(val));
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_file() {
|
||||||
|
if (fp) {
|
||||||
|
std::fclose(fp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
static std::string llama_format_win_err(DWORD err) {
|
||||||
|
LPSTR buf;
|
||||||
|
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||||
|
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
|
||||||
|
if (!size) {
|
||||||
|
return "FormatMessageA failed";
|
||||||
|
}
|
||||||
|
std::string ret(buf, size);
|
||||||
|
LocalFree(buf);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct llama_mmap {
|
||||||
|
void * addr;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
llama_mmap(const llama_mmap &) = delete;
|
||||||
|
|
||||||
|
#ifdef _POSIX_MAPPED_FILES
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
|
||||||
|
size = file->size;
|
||||||
|
int fd = fileno(file->fp);
|
||||||
|
int flags = MAP_PRIVATE;
|
||||||
|
// prefetch/readahead impairs performance on NUMA systems
|
||||||
|
if (numa) { prefetch = 0; }
|
||||||
|
#ifdef __linux__
|
||||||
|
if (prefetch) { flags |= MAP_POPULATE; }
|
||||||
|
#endif
|
||||||
|
addr = mmap(NULL, file->size, PROT_READ | PROT_WRITE, flags, fd, 0);
|
||||||
|
if (addr == MAP_FAILED) {
|
||||||
|
throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prefetch > 0) {
|
||||||
|
// Advise the kernel to preload the mapped memory
|
||||||
|
if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) {
|
||||||
|
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
|
||||||
|
strerror(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (numa) {
|
||||||
|
// advise the kernel not to use readahead
|
||||||
|
// (because the next page might not belong on the same node)
|
||||||
|
if (madvise(addr, file->size, MADV_RANDOM)) {
|
||||||
|
fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n",
|
||||||
|
strerror(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_mmap() {
|
||||||
|
munmap(addr, size);
|
||||||
|
}
|
||||||
|
#elif defined(_WIN32)
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
|
||||||
|
(void) numa;
|
||||||
|
|
||||||
|
size = file->size;
|
||||||
|
|
||||||
|
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
|
||||||
|
|
||||||
|
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
|
||||||
|
DWORD error = GetLastError();
|
||||||
|
|
||||||
|
if (hMapping == NULL) {
|
||||||
|
throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
addr = MapViewOfFile(hMapping, FILE_MAP_COPY, 0, 0, 0);
|
||||||
|
error = GetLastError();
|
||||||
|
CloseHandle(hMapping);
|
||||||
|
|
||||||
|
if (addr == NULL) {
|
||||||
|
throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||||
|
if (prefetch) {
|
||||||
|
// Advise the kernel to preload the mapped memory
|
||||||
|
WIN32_MEMORY_RANGE_ENTRY range;
|
||||||
|
range.VirtualAddress = addr;
|
||||||
|
range.NumberOfBytes = (SIZE_T)size;
|
||||||
|
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||||
|
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
|
||||||
|
#endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_mmap() {
|
||||||
|
if (!UnmapViewOfFile(addr)) {
|
||||||
|
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static constexpr bool SUPPORTED = false;
|
||||||
|
|
||||||
|
llama_mmap(struct llama_file *, bool prefetch = true, bool numa = false) {
|
||||||
|
(void) prefetch;
|
||||||
|
(void) numa;
|
||||||
|
|
||||||
|
throw std::runtime_error(std::string("mmap not supported"));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
// Represents some region of memory being locked using mlock or VirtualLock;
|
||||||
|
// will automatically unlock on destruction.
|
||||||
|
struct llama_mlock {
|
||||||
|
void * addr = NULL;
|
||||||
|
size_t size = 0;
|
||||||
|
bool failed_already = false;
|
||||||
|
|
||||||
|
llama_mlock() {}
|
||||||
|
llama_mlock(const llama_mlock &) = delete;
|
||||||
|
|
||||||
|
~llama_mlock() {
|
||||||
|
if (size) {
|
||||||
|
raw_unlock(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void init(void * ptr) {
|
||||||
|
LLAMA_ASSERT(addr == NULL && size == 0);
|
||||||
|
addr = ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void grow_to(size_t target_size) {
|
||||||
|
LLAMA_ASSERT(addr);
|
||||||
|
if (failed_already) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
size_t granularity = lock_granularity();
|
||||||
|
target_size = (target_size + granularity - 1) & ~(granularity - 1);
|
||||||
|
if (target_size > size) {
|
||||||
|
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
|
||||||
|
size = target_size;
|
||||||
|
} else {
|
||||||
|
failed_already = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _POSIX_MEMLOCK_RANGE
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
size_t lock_granularity() {
|
||||||
|
return (size_t) sysconf(_SC_PAGESIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
|
#define MLOCK_SUGGESTION \
|
||||||
|
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
||||||
|
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
||||||
|
#else
|
||||||
|
#define MLOCK_SUGGESTION \
|
||||||
|
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool raw_lock(const void * addr, size_t size) {
|
||||||
|
if (!mlock(addr, size)) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
char* errmsg = std::strerror(errno);
|
||||||
|
bool suggest = (errno == ENOMEM);
|
||||||
|
|
||||||
|
// Check if the resource limit is fine after all
|
||||||
|
struct rlimit lock_limit;
|
||||||
|
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
|
||||||
|
suggest = false;
|
||||||
|
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
|
||||||
|
suggest = false;
|
||||||
|
|
||||||
|
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
||||||
|
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef MLOCK_SUGGESTION
|
||||||
|
|
||||||
|
void raw_unlock(void * addr, size_t size) {
|
||||||
|
if (munlock(addr, size)) {
|
||||||
|
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#elif defined(_WIN32)
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
size_t lock_granularity() {
|
||||||
|
SYSTEM_INFO si;
|
||||||
|
GetSystemInfo(&si);
|
||||||
|
return (size_t) si.dwPageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool raw_lock(void * ptr, size_t len) {
|
||||||
|
for (int tries = 1; ; tries++) {
|
||||||
|
if (VirtualLock(ptr, len)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (tries == 2) {
|
||||||
|
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
|
||||||
|
len, size, llama_format_win_err(GetLastError()).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// It failed but this was only the first try; increase the working
|
||||||
|
// set size and try again.
|
||||||
|
SIZE_T min_ws_size, max_ws_size;
|
||||||
|
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
|
||||||
|
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Per MSDN: "The maximum number of pages that a process can lock
|
||||||
|
// is equal to the number of pages in its minimum working set minus
|
||||||
|
// a small overhead."
|
||||||
|
// Hopefully a megabyte is enough overhead:
|
||||||
|
size_t increment = len + 1048576;
|
||||||
|
// The minimum must be <= the maximum, so we need to increase both:
|
||||||
|
min_ws_size += increment;
|
||||||
|
max_ws_size += increment;
|
||||||
|
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
|
||||||
|
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void raw_unlock(void * ptr, size_t len) {
|
||||||
|
if (!VirtualUnlock(ptr, len)) {
|
||||||
|
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static constexpr bool SUPPORTED = false;
|
||||||
|
|
||||||
|
size_t lock_granularity() {
|
||||||
|
return (size_t) 65536;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool raw_lock(const void * addr, size_t len) {
|
||||||
|
fprintf(stderr, "warning: mlock not supported on this system\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void raw_unlock(const void * addr, size_t len) {}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
||||||
|
struct llama_buffer {
|
||||||
|
uint8_t * addr = NULL;
|
||||||
|
size_t size = 0;
|
||||||
|
|
||||||
|
llama_buffer() = default;
|
||||||
|
|
||||||
|
void resize(size_t len) {
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
free(addr);
|
||||||
|
int result = posix_memalign((void **) &addr, getpagesize(), len);
|
||||||
|
if (result == 0) {
|
||||||
|
memset(addr, 0, len);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
addr = NULL;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
delete[] addr;
|
||||||
|
addr = new uint8_t[len];
|
||||||
|
#endif
|
||||||
|
size = len;
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_buffer() {
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
free(addr);
|
||||||
|
#else
|
||||||
|
delete[] addr;
|
||||||
|
#endif
|
||||||
|
addr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable copy and move
|
||||||
|
llama_buffer(const llama_buffer&) = delete;
|
||||||
|
llama_buffer(llama_buffer&&) = delete;
|
||||||
|
llama_buffer& operator=(const llama_buffer&) = delete;
|
||||||
|
llama_buffer& operator=(llama_buffer&&) = delete;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
#include "ggml-cuda.h"
|
||||||
|
struct llama_ctx_buffer {
|
||||||
|
uint8_t * addr = NULL;
|
||||||
|
bool is_cuda;
|
||||||
|
size_t size = 0;
|
||||||
|
|
||||||
|
llama_ctx_buffer() = default;
|
||||||
|
|
||||||
|
void resize(size_t size) {
|
||||||
|
free();
|
||||||
|
|
||||||
|
addr = (uint8_t *) ggml_cuda_host_malloc(size);
|
||||||
|
if (addr) {
|
||||||
|
is_cuda = true;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// fall back to pageable memory
|
||||||
|
addr = new uint8_t[size];
|
||||||
|
is_cuda = false;
|
||||||
|
}
|
||||||
|
this->size = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void free() {
|
||||||
|
if (addr) {
|
||||||
|
if (is_cuda) {
|
||||||
|
ggml_cuda_host_free(addr);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
delete[] addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_ctx_buffer() {
|
||||||
|
free();
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable copy and move
|
||||||
|
llama_ctx_buffer(const llama_ctx_buffer&) = delete;
|
||||||
|
llama_ctx_buffer(llama_ctx_buffer&&) = delete;
|
||||||
|
llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete;
|
||||||
|
llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete;
|
||||||
|
};
|
||||||
|
#else
|
||||||
|
typedef llama_buffer llama_ctx_buffer;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
3700
llama/llama.cpp
Normal file
3700
llama/llama.cpp
Normal file
File diff suppressed because it is too large
Load diff
367
llama/llama.go
367
llama/llama.go
|
@ -1,215 +1,234 @@
|
||||||
// MIT License
|
|
||||||
|
|
||||||
// Copyright (c) 2023 go-skynet authors
|
|
||||||
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
// of this software and associated documentation files (the "Software"), to deal
|
|
||||||
// in the Software without restriction, including without limitation the rights
|
|
||||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
// copies of the Software, and to permit persons to whom the Software is
|
|
||||||
// furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
// The above copyright notice and this permission notice shall be included in all
|
|
||||||
// copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
// SOFTWARE.
|
|
||||||
|
|
||||||
package llama
|
package llama
|
||||||
|
|
||||||
// #cgo LDFLAGS: -Lbuild -lbinding -lllama -lm -lggml_static -lstdc++
|
/*
|
||||||
// #cgo CXXFLAGS: -std=c++11
|
#cgo CPPFLAGS: -O3 -DNDEBUG=1
|
||||||
// #cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
#cgo CXXFLAGS: -std=c++11
|
||||||
// #include "binding/binding.h"
|
#cgo darwin CPPFLAGS: -DGGML_USE_METAL=1 -DGGML_METAL_NDEBUG=1
|
||||||
// #include <stdlib.h>
|
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
||||||
import "C"
|
#include <stdlib.h>
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
struct llama_sample_options
|
||||||
|
{
|
||||||
|
float repeat_penalty;
|
||||||
|
float frequency_penalty;
|
||||||
|
float presence_penalty;
|
||||||
|
float temperature;
|
||||||
|
int32_t top_k;
|
||||||
|
float top_p;
|
||||||
|
float tfs_z;
|
||||||
|
float typical_p;
|
||||||
|
int mirostat;
|
||||||
|
float mirostat_tau;
|
||||||
|
float mirostat_eta;
|
||||||
|
};
|
||||||
|
|
||||||
|
llama_token llama_sample(
|
||||||
|
struct llama_context *ctx,
|
||||||
|
struct llama_token_data *candidates,
|
||||||
|
size_t n_candidates,
|
||||||
|
const llama_token *last_tokens,
|
||||||
|
size_t n_last_tokens,
|
||||||
|
struct llama_sample_options *opts)
|
||||||
|
{
|
||||||
|
llama_token_data_array candidates_p = {
|
||||||
|
candidates,
|
||||||
|
n_candidates,
|
||||||
|
false,
|
||||||
|
};
|
||||||
|
|
||||||
|
llama_sample_repetition_penalty(
|
||||||
|
ctx, &candidates_p,
|
||||||
|
last_tokens, n_last_tokens,
|
||||||
|
opts->repeat_penalty);
|
||||||
|
|
||||||
|
llama_sample_frequency_and_presence_penalties(
|
||||||
|
ctx, &candidates_p,
|
||||||
|
last_tokens, n_last_tokens,
|
||||||
|
opts->frequency_penalty, opts->presence_penalty);
|
||||||
|
|
||||||
|
if (opts->temperature <= 0) {
|
||||||
|
return llama_sample_token_greedy(ctx, &candidates_p);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opts->mirostat == 1) {
|
||||||
|
int mirostat_m = 100;
|
||||||
|
float mirostat_mu = 2.0f * opts->mirostat_tau;
|
||||||
|
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
|
||||||
|
return llama_sample_token_mirostat(
|
||||||
|
ctx, &candidates_p,
|
||||||
|
opts->mirostat_tau, opts->mirostat_eta,
|
||||||
|
mirostat_m, &mirostat_mu);
|
||||||
|
} else if (opts->mirostat == 2) {
|
||||||
|
float mirostat_mu = 2.0f * opts->mirostat_tau;
|
||||||
|
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
|
||||||
|
return llama_sample_token_mirostat_v2(
|
||||||
|
ctx, &candidates_p,
|
||||||
|
opts->mirostat_tau, opts->mirostat_eta,
|
||||||
|
&mirostat_mu);
|
||||||
|
} else {
|
||||||
|
llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
|
||||||
|
llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
|
||||||
|
llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
|
||||||
|
llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
|
||||||
|
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
|
||||||
|
return llama_sample_token(ctx, &candidates_p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/jmorganca/ollama/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LLama struct {
|
type llama struct {
|
||||||
ctx unsafe.Pointer
|
params *C.struct_llama_context_params
|
||||||
embeddings bool
|
model *C.struct_llama_model
|
||||||
contextSize int
|
ctx *C.struct_llama_context
|
||||||
|
|
||||||
|
api.Options
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(model string, mo ModelOptions) (*LLama, error) {
|
func New(model string, opts api.Options) (*llama, error) {
|
||||||
modelPath := C.CString(model)
|
if _, err := os.Stat(model); err != nil {
|
||||||
defer C.free(unsafe.Pointer(modelPath))
|
return nil, err
|
||||||
|
|
||||||
ctx := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
|
|
||||||
if ctx == nil {
|
|
||||||
return nil, fmt.Errorf("failed loading model")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ll := &LLama{ctx: ctx, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
|
llm := llama{Options: opts}
|
||||||
|
|
||||||
return ll, nil
|
C.llama_backend_init(C.bool(llm.UseNUMA))
|
||||||
|
|
||||||
|
params := C.llama_context_default_params()
|
||||||
|
params.seed = C.uint(llm.Seed)
|
||||||
|
params.n_ctx = C.int(llm.NumCtx)
|
||||||
|
params.n_batch = C.int(llm.NumBatch)
|
||||||
|
params.n_gpu_layers = C.int(llm.NumGPU)
|
||||||
|
params.main_gpu = C.int(llm.MainGPU)
|
||||||
|
params.low_vram = C.bool(llm.LowVRAM)
|
||||||
|
params.f16_kv = C.bool(llm.F16KV)
|
||||||
|
params.logits_all = C.bool(llm.LogitsAll)
|
||||||
|
params.vocab_only = C.bool(llm.VocabOnly)
|
||||||
|
params.use_mmap = C.bool(llm.UseMMap)
|
||||||
|
params.use_mlock = C.bool(llm.UseMLock)
|
||||||
|
params.embedding = C.bool(llm.EmbeddingOnly)
|
||||||
|
llm.params = ¶ms
|
||||||
|
|
||||||
|
cModel := C.CString(model)
|
||||||
|
defer C.free(unsafe.Pointer(cModel))
|
||||||
|
|
||||||
|
llm.model = C.llama_load_model_from_file(cModel, params)
|
||||||
|
llm.ctx = C.llama_new_context_with_model(llm.model, params)
|
||||||
|
|
||||||
|
// warm up the model
|
||||||
|
bos := []C.llama_token{C.llama_token_bos()}
|
||||||
|
C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
|
||||||
|
C.llama_reset_timings(llm.ctx)
|
||||||
|
|
||||||
|
return &llm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LLama) Free() {
|
func (llm *llama) Close() {
|
||||||
C.llama_binding_free_model(l.ctx)
|
defer C.llama_free_model(llm.model)
|
||||||
|
defer C.llama_free(llm.ctx)
|
||||||
|
|
||||||
|
C.llama_print_timings(llm.ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LLama) Eval(text string, po PredictOptions) error {
|
func (llm *llama) Predict(prompt string, fn func(string)) error {
|
||||||
input := C.CString(text)
|
if tokens := llm.tokenize(prompt); tokens != nil {
|
||||||
if po.Tokens == 0 {
|
return llm.generate(tokens, fn)
|
||||||
po.Tokens = 99999999
|
|
||||||
}
|
|
||||||
defer C.free(unsafe.Pointer(input))
|
|
||||||
|
|
||||||
reverseCount := len(po.StopPrompts)
|
|
||||||
reversePrompt := make([]*C.char, reverseCount)
|
|
||||||
var pass **C.char
|
|
||||||
for i, s := range po.StopPrompts {
|
|
||||||
cs := C.CString(s)
|
|
||||||
reversePrompt[i] = cs
|
|
||||||
pass = &reversePrompt[0]
|
|
||||||
defer C.free(unsafe.Pointer(cs))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cLogitBias := C.CString(po.LogitBias)
|
return errors.New("llama: tokenize")
|
||||||
defer C.free(unsafe.Pointer(cLogitBias))
|
}
|
||||||
|
|
||||||
cMainGPU := C.CString(po.MainGPU)
|
func (llm *llama) tokenize(prompt string) []C.llama_token {
|
||||||
defer C.free(unsafe.Pointer(cMainGPU))
|
cPrompt := C.CString(prompt)
|
||||||
|
defer C.free(unsafe.Pointer(cPrompt))
|
||||||
|
|
||||||
cTensorSplit := C.CString(po.TensorSplit)
|
tokens := make([]C.llama_token, llm.NumCtx)
|
||||||
defer C.free(unsafe.Pointer(cTensorSplit))
|
if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(tokens), C.int(len(tokens)), true); n > 0 {
|
||||||
|
return tokens[:n]
|
||||||
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
|
|
||||||
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
|
|
||||||
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
|
|
||||||
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
|
|
||||||
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
|
|
||||||
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
|
|
||||||
C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
|
|
||||||
)
|
|
||||||
defer C.llama_free_params(params)
|
|
||||||
|
|
||||||
ret := C.eval(params, l.ctx, input)
|
|
||||||
if ret != 0 {
|
|
||||||
return fmt.Errorf("inference failed")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LLama) Predict(text string, po PredictOptions) (string, error) {
|
func (llm *llama) detokenize(tokens ...C.llama_token) string {
|
||||||
if po.TokenCallback != nil {
|
var sb strings.Builder
|
||||||
setCallback(l.ctx, po.TokenCallback)
|
for _, token := range tokens {
|
||||||
|
sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, token)))
|
||||||
}
|
}
|
||||||
|
|
||||||
input := C.CString(text)
|
return sb.String()
|
||||||
if po.Tokens == 0 {
|
|
||||||
po.Tokens = 99999999
|
|
||||||
}
|
|
||||||
defer C.free(unsafe.Pointer(input))
|
|
||||||
|
|
||||||
out := make([]byte, po.Tokens)
|
|
||||||
|
|
||||||
reverseCount := len(po.StopPrompts)
|
|
||||||
reversePrompt := make([]*C.char, reverseCount)
|
|
||||||
var pass **C.char
|
|
||||||
for i, s := range po.StopPrompts {
|
|
||||||
cs := C.CString(s)
|
|
||||||
reversePrompt[i] = cs
|
|
||||||
pass = &reversePrompt[0]
|
|
||||||
defer C.free(unsafe.Pointer(cs))
|
|
||||||
}
|
|
||||||
|
|
||||||
cLogitBias := C.CString(po.LogitBias)
|
|
||||||
defer C.free(unsafe.Pointer(cLogitBias))
|
|
||||||
|
|
||||||
cMainGPU := C.CString(po.MainGPU)
|
|
||||||
defer C.free(unsafe.Pointer(cMainGPU))
|
|
||||||
|
|
||||||
cTensorSplit := C.CString(po.TensorSplit)
|
|
||||||
defer C.free(unsafe.Pointer(cTensorSplit))
|
|
||||||
|
|
||||||
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
|
|
||||||
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
|
|
||||||
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
|
|
||||||
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
|
|
||||||
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
|
|
||||||
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
|
|
||||||
C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
|
|
||||||
)
|
|
||||||
defer C.llama_free_params(params)
|
|
||||||
|
|
||||||
ret := C.llama_predict(params, l.ctx, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
|
|
||||||
if ret != 0 {
|
|
||||||
return "", fmt.Errorf("inference failed")
|
|
||||||
}
|
|
||||||
res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
|
|
||||||
|
|
||||||
res = strings.TrimPrefix(res, " ")
|
|
||||||
res = strings.TrimPrefix(res, text)
|
|
||||||
res = strings.TrimPrefix(res, "\n")
|
|
||||||
|
|
||||||
for _, s := range po.StopPrompts {
|
|
||||||
res = strings.TrimRight(res, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
if po.TokenCallback != nil {
|
|
||||||
setCallback(l.ctx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CGo only allows us to use static calls from C to Go, we can't just dynamically pass in func's.
|
func (llm *llama) generate(tokens []C.llama_token, fn func(string)) error {
|
||||||
// This is the next best thing, we register the callbacks in this map and call tokenCallback from
|
var opts C.struct_llama_sample_options
|
||||||
// the C code. We also attach a finalizer to LLama, so it will unregister the callback when the
|
opts.repeat_penalty = C.float(llm.RepeatPenalty)
|
||||||
// garbage collection frees it.
|
opts.frequency_penalty = C.float(llm.FrequencyPenalty)
|
||||||
|
opts.presence_penalty = C.float(llm.PresencePenalty)
|
||||||
|
opts.temperature = C.float(llm.Temperature)
|
||||||
|
opts.top_k = C.int(llm.TopK)
|
||||||
|
opts.top_p = C.float(llm.TopP)
|
||||||
|
opts.tfs_z = C.float(llm.TFSZ)
|
||||||
|
opts.typical_p = C.float(llm.TypicalP)
|
||||||
|
opts.mirostat = C.int(llm.Mirostat)
|
||||||
|
opts.mirostat_tau = C.float(llm.MirostatTau)
|
||||||
|
opts.mirostat_eta = C.float(llm.MirostatEta)
|
||||||
|
|
||||||
// SetTokenCallback registers a callback for the individual tokens created when running Predict. It
|
pastTokens := deque[C.llama_token]{capacity: llm.RepeatLastN}
|
||||||
// will be called once for each token. The callback shall return true as long as the model should
|
|
||||||
// continue predicting the next token. When the callback returns false the predictor will return.
|
|
||||||
// The tokens are just converted into Go strings, they are not trimmed or otherwise changed. Also
|
|
||||||
// the tokens may not be valid UTF-8.
|
|
||||||
// Pass in nil to remove a callback.
|
|
||||||
//
|
|
||||||
// It is save to call this method while a prediction is running.
|
|
||||||
func (l *LLama) SetTokenCallback(callback func(token string) bool) {
|
|
||||||
setCallback(l.ctx, callback)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
for C.llama_get_kv_cache_token_count(llm.ctx) < C.int(llm.NumCtx) {
|
||||||
m sync.Mutex
|
if retval := C.llama_eval(llm.ctx, unsafe.SliceData(tokens), C.int(len(tokens)), C.llama_get_kv_cache_token_count(llm.ctx), C.int(llm.NumThread)); retval != 0 {
|
||||||
callbacks = map[uintptr]func(string) bool{}
|
return errors.New("llama: eval")
|
||||||
)
|
}
|
||||||
|
|
||||||
//export tokenCallback
|
token, err := llm.sample(pastTokens, &opts)
|
||||||
func tokenCallback(statePtr unsafe.Pointer, token *C.char) bool {
|
switch {
|
||||||
m.Lock()
|
case err != nil:
|
||||||
defer m.Unlock()
|
return err
|
||||||
|
case errors.Is(err, io.EOF):
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if callback, ok := callbacks[uintptr(statePtr)]; ok {
|
fn(llm.detokenize(token))
|
||||||
return callback(C.GoString(token))
|
|
||||||
|
tokens = []C.llama_token{token}
|
||||||
|
|
||||||
|
pastTokens.PushLeft(token)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setCallback can be used to register a token callback for LLama. Pass in a nil callback to
|
func (llm *llama) sample(pastTokens deque[C.llama_token], opts *C.struct_llama_sample_options) (C.llama_token, error) {
|
||||||
// remove the callback.
|
numVocab := int(C.llama_n_vocab(llm.ctx))
|
||||||
func setCallback(statePtr unsafe.Pointer, callback func(string) bool) {
|
logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
if callback == nil {
|
candidates := make([]C.struct_llama_token_data, 0, numVocab)
|
||||||
delete(callbacks, uintptr(statePtr))
|
for i := 0; i < numVocab; i++ {
|
||||||
} else {
|
candidates = append(candidates, C.llama_token_data{
|
||||||
callbacks[uintptr(statePtr)] = callback
|
id: C.int(i),
|
||||||
|
logit: logits[i],
|
||||||
|
p: 0,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
token := C.llama_sample(
|
||||||
|
llm.ctx,
|
||||||
|
unsafe.SliceData(candidates), C.ulong(len(candidates)),
|
||||||
|
unsafe.SliceData(pastTokens.Data()), C.ulong(pastTokens.Len()),
|
||||||
|
opts)
|
||||||
|
if token != C.llama_token_eos() {
|
||||||
|
return token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
410
llama/llama.h
Normal file
410
llama/llama.h
Normal file
|
@ -0,0 +1,410 @@
|
||||||
|
/**
|
||||||
|
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
|
||||||
|
*
|
||||||
|
* MIT License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2023 Georgi Gerganov
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
* SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LLAMA_H
|
||||||
|
#define LLAMA_H
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
#include "ggml-cuda.h"
|
||||||
|
#define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES
|
||||||
|
#else
|
||||||
|
#define LLAMA_MAX_DEVICES 1
|
||||||
|
#endif // GGML_USE_CUBLAS
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
|
||||||
|
#ifdef LLAMA_SHARED
|
||||||
|
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||||
|
# ifdef LLAMA_BUILD
|
||||||
|
# define LLAMA_API __declspec(dllexport)
|
||||||
|
# else
|
||||||
|
# define LLAMA_API __declspec(dllimport)
|
||||||
|
# endif
|
||||||
|
# else
|
||||||
|
# define LLAMA_API __attribute__ ((visibility ("default")))
|
||||||
|
# endif
|
||||||
|
#else
|
||||||
|
# define LLAMA_API
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __GNUC__
|
||||||
|
# define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
|
||||||
|
#elif defined(_MSC_VER)
|
||||||
|
# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
|
||||||
|
#else
|
||||||
|
# define DEPRECATED(func, hint) func
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
|
||||||
|
#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
|
||||||
|
#define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf'
|
||||||
|
#define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml'
|
||||||
|
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
||||||
|
|
||||||
|
#define LLAMA_FILE_VERSION 3
|
||||||
|
#define LLAMA_FILE_MAGIC LLAMA_FILE_MAGIC_GGJT
|
||||||
|
#define LLAMA_FILE_MAGIC_UNVERSIONED LLAMA_FILE_MAGIC_GGML
|
||||||
|
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
||||||
|
#define LLAMA_SESSION_VERSION 1
|
||||||
|
|
||||||
|
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
|
||||||
|
|
||||||
|
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
|
||||||
|
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
||||||
|
#define LLAMA_SUPPORTS_GPU_OFFLOAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//
|
||||||
|
// C interface
|
||||||
|
//
|
||||||
|
// TODO: show sample usage
|
||||||
|
//
|
||||||
|
|
||||||
|
struct llama_model;
|
||||||
|
struct llama_context;
|
||||||
|
|
||||||
|
typedef int llama_token;
|
||||||
|
|
||||||
|
typedef struct llama_token_data {
|
||||||
|
llama_token id; // token id
|
||||||
|
float logit; // log-odds of the token
|
||||||
|
float p; // probability of the token
|
||||||
|
} llama_token_data;
|
||||||
|
|
||||||
|
typedef struct llama_token_data_array {
|
||||||
|
llama_token_data * data;
|
||||||
|
size_t size;
|
||||||
|
bool sorted;
|
||||||
|
} llama_token_data_array;
|
||||||
|
|
||||||
|
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||||
|
|
||||||
|
struct llama_context_params {
|
||||||
|
uint32_t seed; // RNG seed, -1 for random
|
||||||
|
int32_t n_ctx; // text context
|
||||||
|
int32_t n_batch; // prompt processing batch size
|
||||||
|
int32_t n_gpu_layers; // number of layers to store in VRAM
|
||||||
|
int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
||||||
|
float tensor_split[LLAMA_MAX_DEVICES]; // how to split layers across multiple GPUs
|
||||||
|
// called with a progress value between 0 and 1, pass NULL to disable
|
||||||
|
llama_progress_callback progress_callback;
|
||||||
|
// context pointer passed to the progress callback
|
||||||
|
void * progress_callback_user_data;
|
||||||
|
|
||||||
|
// Keep the booleans together to avoid misalignment during copy-by-value.
|
||||||
|
bool low_vram; // if true, reduce VRAM usage at the cost of performance
|
||||||
|
bool f16_kv; // use fp16 for KV cache
|
||||||
|
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||||
|
bool vocab_only; // only load the vocabulary, no weights
|
||||||
|
bool use_mmap; // use mmap if possible
|
||||||
|
bool use_mlock; // force system to keep model in RAM
|
||||||
|
bool embedding; // embedding mode only
|
||||||
|
};
|
||||||
|
// model file types
|
||||||
|
enum llama_ftype {
|
||||||
|
LLAMA_FTYPE_ALL_F32 = 0,
|
||||||
|
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||||
|
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
||||||
|
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
|
||||||
|
};
|
||||||
|
|
||||||
|
// model quantization parameters
|
||||||
|
typedef struct llama_model_quantize_params {
|
||||||
|
int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
|
||||||
|
enum llama_ftype ftype; // quantize to this llama_ftype
|
||||||
|
bool allow_requantize; // allow quantizing non-f32/f16 tensors
|
||||||
|
bool quantize_output_tensor; // quantize output.weight
|
||||||
|
} llama_model_quantize_params;
|
||||||
|
|
||||||
|
// performance timing information
|
||||||
|
struct llama_timings {
|
||||||
|
double t_start_ms;
|
||||||
|
double t_end_ms;
|
||||||
|
double t_load_ms;
|
||||||
|
double t_sample_ms;
|
||||||
|
double t_p_eval_ms;
|
||||||
|
double t_eval_ms;
|
||||||
|
|
||||||
|
int32_t n_sample;
|
||||||
|
int32_t n_p_eval;
|
||||||
|
int32_t n_eval;
|
||||||
|
};
|
||||||
|
|
||||||
|
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||||
|
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
||||||
|
|
||||||
|
LLAMA_API bool llama_mmap_supported();
|
||||||
|
LLAMA_API bool llama_mlock_supported();
|
||||||
|
|
||||||
|
// TODO: not great API - very likely to change
|
||||||
|
// Initialize the llama + ggml backend
|
||||||
|
// If numa is true, use NUMA optimizations
|
||||||
|
// Call once at the start of the program
|
||||||
|
LLAMA_API void llama_backend_init(bool numa);
|
||||||
|
// Call once at the end of the program - currently only used for MPI
|
||||||
|
LLAMA_API void llama_backend_free();
|
||||||
|
|
||||||
|
LLAMA_API int64_t llama_time_us();
|
||||||
|
|
||||||
|
LLAMA_API struct llama_model * llama_load_model_from_file(
|
||||||
|
const char * path_model,
|
||||||
|
struct llama_context_params params);
|
||||||
|
|
||||||
|
LLAMA_API void llama_free_model(struct llama_model * model);
|
||||||
|
|
||||||
|
LLAMA_API struct llama_context * llama_new_context_with_model(
|
||||||
|
struct llama_model * model,
|
||||||
|
struct llama_context_params params);
|
||||||
|
|
||||||
|
// Various functions for loading a ggml llama model.
|
||||||
|
// Allocate (almost) all memory needed for the model.
|
||||||
|
// Return NULL on failure
|
||||||
|
LLAMA_API DEPRECATED(struct llama_context * llama_init_from_file(
|
||||||
|
const char * path_model,
|
||||||
|
struct llama_context_params params),
|
||||||
|
"please use llama_load_model_from_file combined with llama_new_context_with_model instead");
|
||||||
|
|
||||||
|
// Frees all allocated memory
|
||||||
|
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Returns 0 on success
|
||||||
|
LLAMA_API int llama_model_quantize(
|
||||||
|
const char * fname_inp,
|
||||||
|
const char * fname_out,
|
||||||
|
const llama_model_quantize_params * params);
|
||||||
|
|
||||||
|
// Apply a LoRA adapter to a loaded model
|
||||||
|
// path_base_model is the path to a higher quality model to use as a base for
|
||||||
|
// the layers modified by the adapter. Can be NULL to use the current loaded model.
|
||||||
|
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
|
||||||
|
// will be applied on top of the previous one
|
||||||
|
// Returns 0 on success
|
||||||
|
LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const char * path_lora,
|
||||||
|
const char * path_base_model,
|
||||||
|
int n_threads),
|
||||||
|
"please use llama_model_apply_lora_from_file instead");
|
||||||
|
|
||||||
|
LLAMA_API int llama_model_apply_lora_from_file(
|
||||||
|
const struct llama_model * model,
|
||||||
|
const char * path_lora,
|
||||||
|
const char * path_base_model,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
// Returns the number of tokens in the KV cache
|
||||||
|
LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Sets the current rng seed.
|
||||||
|
LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
|
||||||
|
|
||||||
|
// Returns the maximum size in bytes of the state (rng, logits, embedding
|
||||||
|
// and kv_cache) - will often be smaller after compacting tokens
|
||||||
|
LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Copies the state to the specified destination address.
|
||||||
|
// Destination needs to have allocated enough memory.
|
||||||
|
// Returns the number of bytes copied
|
||||||
|
LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst);
|
||||||
|
|
||||||
|
// Set the state reading from the specified address
|
||||||
|
// Returns the number of bytes read
|
||||||
|
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src);
|
||||||
|
|
||||||
|
// Save/load session file
|
||||||
|
LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
|
||||||
|
LLAMA_API bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count);
|
||||||
|
|
||||||
|
// Run the llama inference to obtain the logits and probabilities for the next token.
|
||||||
|
// tokens + n_tokens is the provided batch of new tokens to process
|
||||||
|
// n_past is the number of tokens to use from previous eval calls
|
||||||
|
// Returns 0 on success
|
||||||
|
LLAMA_API int llama_eval(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const llama_token * tokens,
|
||||||
|
int n_tokens,
|
||||||
|
int n_past,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
// Same as llama_eval, but use float matrix input directly.
|
||||||
|
LLAMA_API int llama_eval_embd(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const float * embd,
|
||||||
|
int n_tokens,
|
||||||
|
int n_past,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
// Export a static computation graph for context of 511 and batch size of 1
|
||||||
|
// NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these
|
||||||
|
// parameters here to keep things simple
|
||||||
|
// IMPORTANT: do not use for anything else other than debugging and testing!
|
||||||
|
LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
|
||||||
|
|
||||||
|
// Convert the provided text into tokens.
|
||||||
|
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||||
|
// Returns the number of tokens on success, no more than n_max_tokens
|
||||||
|
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||||
|
// TODO: not sure if correct
|
||||||
|
LLAMA_API int llama_tokenize(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const char * text,
|
||||||
|
llama_token * tokens,
|
||||||
|
int n_max_tokens,
|
||||||
|
bool add_bos);
|
||||||
|
|
||||||
|
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
||||||
|
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||||
|
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Get the vocabulary as output parameters.
|
||||||
|
// Returns number of results.
|
||||||
|
LLAMA_API int llama_get_vocab(
|
||||||
|
const struct llama_context * ctx,
|
||||||
|
const char * * strings,
|
||||||
|
float * scores,
|
||||||
|
int capacity);
|
||||||
|
|
||||||
|
// Token logits obtained from the last call to llama_eval()
|
||||||
|
// The logits for the last token are stored in the last row
|
||||||
|
// Can be mutated in order to change the probabilities of the next token
|
||||||
|
// Rows: n_tokens
|
||||||
|
// Cols: n_vocab
|
||||||
|
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Get the embeddings for the input
|
||||||
|
// shape: [n_embd] (1-dimensional)
|
||||||
|
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Token Id -> String. Uses the vocabulary in the provided context
|
||||||
|
LLAMA_API const char * llama_token_to_str(const struct llama_context * ctx, llama_token token);
|
||||||
|
|
||||||
|
// Special tokens
|
||||||
|
LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence
|
||||||
|
LLAMA_API llama_token llama_token_eos(); // end-of-sentence
|
||||||
|
LLAMA_API llama_token llama_token_nl(); // next-line
|
||||||
|
|
||||||
|
// Sampling functions
|
||||||
|
|
||||||
|
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
||||||
|
LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
|
||||||
|
|
||||||
|
/// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
|
||||||
|
LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
|
||||||
|
|
||||||
|
/// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
|
||||||
|
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted.
|
||||||
|
/// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
|
||||||
|
/// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
|
||||||
|
/// @params smooth_factor Smooth factor between guidance logits and original logits. 1.0f means only use guidance logits. 0.0f means only original logits.
|
||||||
|
LLAMA_API void llama_sample_classifier_free_guidance(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_token_data_array * candidates,
|
||||||
|
struct llama_context * guidance_ctx,
|
||||||
|
float scale,
|
||||||
|
float smooth_factor);
|
||||||
|
|
||||||
|
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
||||||
|
LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||||
|
|
||||||
|
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||||
|
LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep);
|
||||||
|
|
||||||
|
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||||
|
LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||||
|
|
||||||
|
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
|
||||||
|
LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep);
|
||||||
|
|
||||||
|
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
||||||
|
LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||||
|
LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
|
||||||
|
|
||||||
|
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||||
|
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||||
|
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||||
|
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||||
|
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
|
||||||
|
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||||
|
LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
|
||||||
|
|
||||||
|
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||||
|
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||||
|
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||||
|
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||||
|
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||||
|
LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
|
||||||
|
|
||||||
|
/// @details Selects the token with the highest probability.
|
||||||
|
LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||||
|
|
||||||
|
/// @details Randomly selects a token from the candidates based on their probabilities.
|
||||||
|
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||||
|
|
||||||
|
// Performance information
|
||||||
|
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
|
||||||
|
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
||||||
|
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Print system information
|
||||||
|
LLAMA_API const char * llama_print_system_info(void);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Internal API to be implemented by llama.cpp and used by tests/benchmarks only
|
||||||
|
#ifdef LLAMA_API_INTERNAL
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
struct ggml_tensor;
|
||||||
|
|
||||||
|
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // LLAMA_H
|
|
@ -1,9 +0,0 @@
|
||||||
//go:build cublas
|
|
||||||
// +build cublas
|
|
||||||
|
|
||||||
package llama
|
|
||||||
|
|
||||||
/*
|
|
||||||
#cgo LDFLAGS: -lcublas -lcudart -L/usr/local/cuda/lib64/
|
|
||||||
*/
|
|
||||||
import "C"
|
|
|
@ -1,2 +0,0 @@
|
||||||
//go:build metal
|
|
||||||
package llama
|
|
|
@ -1,9 +0,0 @@
|
||||||
//go:build openblas
|
|
||||||
// +build openblas
|
|
||||||
|
|
||||||
package llama
|
|
||||||
|
|
||||||
/*
|
|
||||||
#cgo LDFLAGS: -lopenblas
|
|
||||||
*/
|
|
||||||
import "C"
|
|
|
@ -1,98 +0,0 @@
|
||||||
// MIT License
|
|
||||||
|
|
||||||
// Copyright (c) 2023 go-skynet authors
|
|
||||||
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
// of this software and associated documentation files (the "Software"), to deal
|
|
||||||
// in the Software without restriction, including without limitation the rights
|
|
||||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
// copies of the Software, and to permit persons to whom the Software is
|
|
||||||
// furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
// The above copyright notice and this permission notice shall be included in all
|
|
||||||
// copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
// SOFTWARE.
|
|
||||||
|
|
||||||
package llama
|
|
||||||
|
|
||||||
type ModelOptions struct {
|
|
||||||
ContextSize int
|
|
||||||
Seed int
|
|
||||||
NBatch int
|
|
||||||
F16Memory bool
|
|
||||||
MLock bool
|
|
||||||
MMap bool
|
|
||||||
VocabOnly bool
|
|
||||||
LowVRAM bool
|
|
||||||
Embeddings bool
|
|
||||||
NUMA bool
|
|
||||||
NGPULayers int
|
|
||||||
MainGPU string
|
|
||||||
TensorSplit string
|
|
||||||
}
|
|
||||||
|
|
||||||
type PredictOptions struct {
|
|
||||||
Seed, Threads, Tokens, TopK, Repeat, Batch, NKeep int
|
|
||||||
TopP, Temperature, Penalty float64
|
|
||||||
F16KV bool
|
|
||||||
DebugMode bool
|
|
||||||
StopPrompts []string
|
|
||||||
IgnoreEOS bool
|
|
||||||
|
|
||||||
TailFreeSamplingZ float64
|
|
||||||
TypicalP float64
|
|
||||||
FrequencyPenalty float64
|
|
||||||
PresencePenalty float64
|
|
||||||
Mirostat int
|
|
||||||
MirostatETA float64
|
|
||||||
MirostatTAU float64
|
|
||||||
PenalizeNL bool
|
|
||||||
LogitBias string
|
|
||||||
TokenCallback func(string) bool
|
|
||||||
|
|
||||||
MLock, MMap bool
|
|
||||||
MainGPU string
|
|
||||||
TensorSplit string
|
|
||||||
}
|
|
||||||
|
|
||||||
type PredictOption func(p *PredictOptions)
|
|
||||||
|
|
||||||
type ModelOption func(p *ModelOptions)
|
|
||||||
|
|
||||||
var DefaultModelOptions ModelOptions = ModelOptions{
|
|
||||||
ContextSize: 512,
|
|
||||||
Seed: 0,
|
|
||||||
F16Memory: false,
|
|
||||||
MLock: false,
|
|
||||||
Embeddings: false,
|
|
||||||
MMap: true,
|
|
||||||
LowVRAM: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
var DefaultOptions PredictOptions = PredictOptions{
|
|
||||||
Seed: -1,
|
|
||||||
Threads: 4,
|
|
||||||
Tokens: 128,
|
|
||||||
Penalty: 1.1,
|
|
||||||
Repeat: 64,
|
|
||||||
Batch: 512,
|
|
||||||
NKeep: 64,
|
|
||||||
TopK: 40,
|
|
||||||
TopP: 0.95,
|
|
||||||
TailFreeSamplingZ: 1.0,
|
|
||||||
TypicalP: 1.0,
|
|
||||||
Temperature: 0.8,
|
|
||||||
FrequencyPenalty: 0.0,
|
|
||||||
PresencePenalty: 0.0,
|
|
||||||
Mirostat: 0,
|
|
||||||
MirostatTAU: 5.0,
|
|
||||||
MirostatETA: 0.1,
|
|
||||||
MMap: true,
|
|
||||||
}
|
|
104
llama/utils.go
Normal file
104
llama/utils.go
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package llama
|
||||||
|
|
||||||
|
type node[T any] struct {
|
||||||
|
t T
|
||||||
|
next *node[T]
|
||||||
|
prev *node[T]
|
||||||
|
}
|
||||||
|
|
||||||
|
type deque[T any] struct {
|
||||||
|
head *node[T]
|
||||||
|
tail *node[T]
|
||||||
|
size int
|
||||||
|
capacity int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) Empty() bool {
|
||||||
|
return d.size == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) Len() int {
|
||||||
|
return d.size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) Cap() int {
|
||||||
|
return d.capacity
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) Push(t T) {
|
||||||
|
if d.capacity > 0 && d.size >= d.capacity {
|
||||||
|
d.PopLeft()
|
||||||
|
}
|
||||||
|
|
||||||
|
n := node[T]{t: t}
|
||||||
|
if d.head != nil {
|
||||||
|
n.next = d.head
|
||||||
|
d.head.prev = &n
|
||||||
|
d.head = &n
|
||||||
|
} else {
|
||||||
|
d.head = &n
|
||||||
|
d.tail = &n
|
||||||
|
}
|
||||||
|
|
||||||
|
d.size++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) PushLeft(t T) {
|
||||||
|
if d.capacity > 0 && d.size >= d.capacity {
|
||||||
|
d.Pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
n := node[T]{t: t}
|
||||||
|
if d.tail != nil {
|
||||||
|
n.prev = d.tail
|
||||||
|
d.tail.next = &n
|
||||||
|
d.tail = &n
|
||||||
|
} else {
|
||||||
|
d.head = &n
|
||||||
|
d.tail = &n
|
||||||
|
}
|
||||||
|
|
||||||
|
d.size++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) Pop() *T {
|
||||||
|
if d.Empty() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
head := d.head
|
||||||
|
d.head = head.next
|
||||||
|
if d.head != nil {
|
||||||
|
d.head.prev = nil
|
||||||
|
} else {
|
||||||
|
d.tail = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.size--
|
||||||
|
return &head.t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) PopLeft() *T {
|
||||||
|
if d.Empty() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tail := d.tail
|
||||||
|
d.tail = tail.prev
|
||||||
|
if d.tail != nil {
|
||||||
|
d.tail.next = nil
|
||||||
|
} else {
|
||||||
|
d.head = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.size--
|
||||||
|
return &tail.t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deque[T]) Data() (data []T) {
|
||||||
|
for n := d.head; n != nil; n = n.next {
|
||||||
|
data = append(data, n.t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
}
|
137
server/routes.go
137
server/routes.go
|
@ -11,12 +11,12 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/lithammer/fuzzysearch/fuzzy"
|
"github.com/lithammer/fuzzysearch/fuzzy"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
"github.com/jmorganca/ollama/api"
|
||||||
"github.com/jmorganca/ollama/llama"
|
"github.com/jmorganca/ollama/llama"
|
||||||
|
@ -36,14 +36,10 @@ func cacheDir() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func generate(c *gin.Context) {
|
func generate(c *gin.Context) {
|
||||||
var req api.GenerateRequest
|
req := api.GenerateRequest{
|
||||||
if req.ModelOptions == nil {
|
Options: api.DefaultOptions(),
|
||||||
req.ModelOptions = &api.DefaultModelOptions
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.PredictOptions == nil {
|
|
||||||
req.PredictOptions = &api.DefaultPredictOptions
|
|
||||||
}
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
|
@ -60,15 +56,12 @@ func generate(c *gin.Context) {
|
||||||
req.Model = path.Join(cacheDir(), "models", req.Model+".bin")
|
req.Model = path.Join(cacheDir(), "models", req.Model+".bin")
|
||||||
}
|
}
|
||||||
|
|
||||||
modelOpts := getModelOpts(req)
|
llm, err := llama.New(req.Model, req.Options)
|
||||||
modelOpts.NGPULayers = 1 // hard-code this for now
|
|
||||||
|
|
||||||
model, err := llama.New(req.Model, modelOpts)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer model.Free()
|
defer llm.Close()
|
||||||
|
|
||||||
templateNames := make([]string, 0, len(templates.Templates()))
|
templateNames := make([]string, 0, len(templates.Templates()))
|
||||||
for _, template := range templates.Templates() {
|
for _, template := range templates.Templates() {
|
||||||
|
@ -87,43 +80,41 @@ func generate(c *gin.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ch := make(chan string)
|
ch := make(chan string)
|
||||||
model.SetTokenCallback(func(token string) bool {
|
g, _ := errgroup.WithContext(c.Request.Context())
|
||||||
ch <- token
|
g.Go(func() error {
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
predictOpts := getPredictOpts(req)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
_, err := model.Predict(req.Prompt, predictOpts)
|
return llm.Predict(req.Prompt, func(s string) {
|
||||||
if err != nil {
|
ch <- s
|
||||||
panic(err)
|
})
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
c.Stream(func(w io.Writer) bool {
|
|
||||||
token, ok := <-ch
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := api.GenerateResponse{
|
|
||||||
Response: token,
|
|
||||||
}
|
|
||||||
|
|
||||||
bts, err := json.Marshal(resp)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
bts = append(bts, '\n')
|
|
||||||
if _, err := w.Write(bts); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
c.Stream(func(w io.Writer) bool {
|
||||||
|
s, ok := <-ch
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
bts, err := json.Marshal(api.GenerateResponse{Response: s})
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
bts = append(bts, '\n')
|
||||||
|
if _, err := w.Write(bts); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Serve(ln net.Listener) error {
|
func Serve(ln net.Listener) error {
|
||||||
|
@ -195,53 +186,3 @@ func matchRankOne(source string, targets []string) (bestMatch string, bestRank i
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getModelOpts(req api.GenerateRequest) llama.ModelOptions {
|
|
||||||
var opts llama.ModelOptions
|
|
||||||
opts.ContextSize = req.ModelOptions.ContextSize
|
|
||||||
opts.Seed = req.ModelOptions.Seed
|
|
||||||
opts.F16Memory = req.ModelOptions.F16Memory
|
|
||||||
opts.MLock = req.ModelOptions.MLock
|
|
||||||
opts.Embeddings = req.ModelOptions.Embeddings
|
|
||||||
opts.MMap = req.ModelOptions.MMap
|
|
||||||
opts.LowVRAM = req.ModelOptions.LowVRAM
|
|
||||||
|
|
||||||
opts.NBatch = req.ModelOptions.NBatch
|
|
||||||
opts.VocabOnly = req.ModelOptions.VocabOnly
|
|
||||||
opts.NUMA = req.ModelOptions.NUMA
|
|
||||||
opts.NGPULayers = req.ModelOptions.NGPULayers
|
|
||||||
opts.MainGPU = req.ModelOptions.MainGPU
|
|
||||||
opts.TensorSplit = req.ModelOptions.TensorSplit
|
|
||||||
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPredictOpts(req api.GenerateRequest) llama.PredictOptions {
|
|
||||||
var opts llama.PredictOptions
|
|
||||||
|
|
||||||
if req.PredictOptions.Threads == -1 {
|
|
||||||
opts.Threads = runtime.NumCPU()
|
|
||||||
} else {
|
|
||||||
opts.Threads = req.PredictOptions.Threads
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Seed = req.PredictOptions.Seed
|
|
||||||
opts.Tokens = req.PredictOptions.Tokens
|
|
||||||
opts.Penalty = req.PredictOptions.Penalty
|
|
||||||
opts.Repeat = req.PredictOptions.Repeat
|
|
||||||
opts.Batch = req.PredictOptions.Batch
|
|
||||||
opts.NKeep = req.PredictOptions.NKeep
|
|
||||||
opts.TopK = req.PredictOptions.TopK
|
|
||||||
opts.TopP = req.PredictOptions.TopP
|
|
||||||
opts.TailFreeSamplingZ = req.PredictOptions.TailFreeSamplingZ
|
|
||||||
opts.TypicalP = req.PredictOptions.TypicalP
|
|
||||||
opts.Temperature = req.PredictOptions.Temperature
|
|
||||||
opts.FrequencyPenalty = req.PredictOptions.FrequencyPenalty
|
|
||||||
opts.PresencePenalty = req.PredictOptions.PresencePenalty
|
|
||||||
opts.Mirostat = req.PredictOptions.Mirostat
|
|
||||||
opts.MirostatTAU = req.PredictOptions.MirostatTAU
|
|
||||||
opts.MirostatETA = req.PredictOptions.MirostatETA
|
|
||||||
opts.MMap = req.PredictOptions.MMap
|
|
||||||
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue