upgrade ollama backend support to use the chat completions api
This commit is contained in:
parent
29cec6445c
commit
8cde5ce43f
|
@ -99,14 +99,26 @@ fn parse_llamacpp_text(text: &str) -> Result<Vec<Generation>> {
|
|||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct OllamaGeneration {
|
||||
response: String,
|
||||
struct OllamaGenerationChoiceMessage {
|
||||
role: String,
|
||||
content: String,
|
||||
}
|
||||
|
||||
impl From<OllamaGeneration> for Generation {
|
||||
fn from(value: OllamaGeneration) -> Self {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct OllamaGenerationChoice {
|
||||
index: i32,
|
||||
message: OllamaGenerationChoiceMessage,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct OllamaGeneration {
|
||||
choices: Vec<OllamaGenerationChoice>
|
||||
}
|
||||
|
||||
impl From<OllamaGenerationChoice> for Generation {
|
||||
fn from(value: OllamaGenerationChoice) -> Self {
|
||||
Generation {
|
||||
generated_text: value.response,
|
||||
generated_text: value.message.content,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -124,7 +136,7 @@ fn build_ollama_headers(api_token: Option<&String>, ide: Ide) -> Result<HeaderMa
|
|||
|
||||
fn parse_ollama_text(text: &str) -> Result<Vec<Generation>> {
|
||||
match serde_json::from_str(text)? {
|
||||
OllamaAPIResponse::Generation(gen) => Ok(vec![gen.into()]),
|
||||
OllamaAPIResponse::Generation(completion) => Ok(completion.choices.into_iter().map(|x| x.into()).collect()),
|
||||
OllamaAPIResponse::Error(err) => Err(Error::Ollama(err)),
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +239,9 @@ pub(crate) fn build_body(
|
|||
request_body.insert("prompt".to_owned(), Value::String(prompt));
|
||||
}
|
||||
Backend::Ollama { .. } | Backend::OpenAi { .. } => {
|
||||
request_body.insert("prompt".to_owned(), Value::String(prompt));
|
||||
request_body.insert("messages".to_owned(), json!([
|
||||
{ "role": "user", "content": prompt }
|
||||
]));
|
||||
request_body.insert("model".to_owned(), Value::String(model));
|
||||
request_body.insert("stream".to_owned(), Value::Bool(false));
|
||||
}
|
||||
|
|
|
@ -441,19 +441,19 @@ fn build_url(backend: Backend, model: &str, disable_url_path_completion: bool) -
|
|||
}
|
||||
}
|
||||
Backend::Ollama { mut url } => {
|
||||
if url.ends_with("/api/generate") {
|
||||
if url.ends_with("/v1/chat/completions") {
|
||||
url
|
||||
} else if url.ends_with("/api/") {
|
||||
url.push_str("generate");
|
||||
} else if url.ends_with("/v1/") {
|
||||
url.push_str("chat/completions");
|
||||
url
|
||||
} else if url.ends_with("/api") {
|
||||
url.push_str("/generate");
|
||||
} else if url.ends_with("/v1") {
|
||||
url.push_str("/chat/completions");
|
||||
url
|
||||
} else if url.ends_with('/') {
|
||||
url.push_str("api/generate");
|
||||
url.push_str("v1/chat/completions");
|
||||
url
|
||||
} else {
|
||||
url.push_str("/api/generate");
|
||||
url.push_str("/v1/chat/completions");
|
||||
url
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue