![Daniel Hiltgen](/assets/img/avatar_default.png)
This reduces the built-in linux version to not use any vector extensions which enables the resulting builds to run under Rosetta on MacOS in Docker. Then at runtime it checks for the actual CPU vector extensions and loads the best CPU library available
18 lines
559 B
Go
18 lines
559 B
Go
//go:build !darwin
|
|
|
|
package llm
|
|
|
|
import (
|
|
"fmt"
|
|
|
|
"github.com/jmorganca/ollama/api"
|
|
)
|
|
|
|
func newDefaultExtServer(model string, adapters, projectors []string, opts api.Options) (extServer, error) {
|
|
// On windows and linux we always load the llama.cpp libraries dynamically to avoid startup DLL dependencies
|
|
// This ensures we can update the PATH at runtime to get everything loaded
|
|
|
|
// This should never happen as we'll always try to load one or more cpu dynamic libaries before hitting default
|
|
return nil, fmt.Errorf("no available default llm library")
|
|
}
|