From eb2f19f576f4f41ec8063af40994f595c6ea6f9e Mon Sep 17 00:00:00 2001 From: gered Date: Sat, 19 Oct 2024 15:48:38 -0400 Subject: [PATCH] update minuet-ai config use a fork to fix compatibility with ollama + openweb ui endpoints update the minuet-ai config to use the updated prompt string and response parsing customization capabilities that this fork brings in, which is necessary to use the non-legacy api completions endpoint --- nvim/lazy-lock.json | 2 +- nvim/lua/plugins/nvim-cmp.lua | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/nvim/lazy-lock.json b/nvim/lazy-lock.json index 80bcb3d..535c9d1 100644 --- a/nvim/lazy-lock.json +++ b/nvim/lazy-lock.json @@ -22,7 +22,7 @@ "mason.nvim": { "branch": "main", "commit": "e2f7f9044ec30067bc11800a9e266664b88cda22" }, "menu": { "branch": "main", "commit": "ea606f6ab2430db0aece8075e62c14132b815ae1" }, "mini.nvim": { "branch": "main", "commit": "e52ac74bd4e9c0ce6a182ee551eb099236b5a89d" }, - "minuet-ai.nvim": { "branch": "main", "commit": "bd5a7ae2bda3a4f57e8a94e3229f41647c77c69e" }, + "minuet-ai": { "branch": "openai_fim_fn_customizations", "commit": "cec6383aeacb5087134204412bd980f368972c53" }, "nvim-ansible": { "branch": "main", "commit": "9c3b4a771b8c8d7b4f2171466464d978cb3846f7" }, "nvim-autopairs": { "branch": "master", "commit": "ee297f215e95a60b01fde33275cc3c820eddeebe" }, "nvim-cmp": { "branch": "main", "commit": "ae644feb7b67bf1ce4260c231d1d4300b19c6f30" }, diff --git a/nvim/lua/plugins/nvim-cmp.lua b/nvim/lua/plugins/nvim-cmp.lua index 53da09d..74413c1 100644 --- a/nvim/lua/plugins/nvim-cmp.lua +++ b/nvim/lua/plugins/nvim-cmp.lua @@ -37,7 +37,10 @@ return { 'hrsh7th/cmp-nvim-lsp-signature-help', { - 'milanglacier/minuet-ai.nvim', + -- 'milanglacier/minuet-ai.nvim', + -- dir = '~/code/minuet-ai.nvim', + url = 'ssh://git@code.blarg.ca:2250/gered/minuet-ai.git', + branch = 'openai_fim_fn_customizations', config = function() local function get_stop_tokens(model) if model:match '^codellama' then @@ -53,6 +56,20 @@ return { end end + local function get_fim_prompt(model, prefix, suffix) + if model:match '^codellama' then + return '
 ' .. prefix .. ' ' .. suffix .. ' '
+            elseif model:match '^qwen' then
+              return '<|fim_prefix|>' .. prefix .. '<|fim_suffix|>' .. suffix .. '<|fim_middle|>'
+            elseif model:match '^starcoder' then
+              return '' .. prefix .. '' .. suffix .. ''
+            elseif model:match '^codestral' then
+              return '[SUFFIX]' .. suffix .. '[PREFIX]' .. prefix
+            elseif model:match '^deepseek-coder' then
+              return '<|fim▁begin|>' .. prefix .. '<|fim▁hole|>' .. suffix .. '<|fim▁end|>'
+            end
+          end
+
           local llama_base_url = os.getenv 'LLAMA_API_BASE' or 'http://localhost:11434'
           -- local model = 'codellama:7b-code'
           local model = 'codellama:13b-code'
@@ -75,7 +92,7 @@ return {
             provider_options = {
               openai_fim_compatible = {
                 model = model,
-                end_point = llama_base_url .. '/v1/completions',
+                end_point = llama_base_url .. '/v1/chat/completions',
                 api_key = 'LLAMA_API_KEY',
                 name = 'Ollama',
                 stream = false,
@@ -85,6 +102,20 @@ return {
                   temperature = 0.2,
                   n = 1,
                 },
+                prompt_fn = function(data, options, prefix, suffix)
+                  data.messages = {
+                    {
+                      role = 'user',
+                      content = get_fim_prompt(model, prefix, suffix),
+                    },
+                  }
+                  --data.prompt = prefix
+                  --data.suffix = suffix
+                end,
+                get_text_fn = function(json)
+                  return json.choices[1].message.content
+                  --return json.choices[1].text
+                end,
               },
             },
           }