Skip to content

Commit 0423930

Browse files
committed
refactor(llm): configure codecompanion to use local copilot service
1 parent 2ff04ce commit 0423930

File tree

1 file changed

+66
-45
lines changed

1 file changed

+66
-45
lines changed

lua/dcai/llm/codecompanion.lua

+66-45
Original file line numberDiff line numberDiff line change
@@ -80,48 +80,28 @@ end
8080

8181
M.setup = function()
8282
vim.env['CODECOMPANION_TOKEN_PATH'] = vim.fn.expand(vim.env.XDG_CONFIG_HOME)
83-
84-
local provider, model = vim.g.get_ai_model()
83+
local provider = 'local_copilot'
8584

8685
local instance = require('codecompanion')
8786
instance.setup({
88-
display = {
89-
action_palette = {
90-
provider = 'default',
91-
opts = {
92-
show_default_actions = true,
93-
show_default_prompt_library = true,
94-
},
95-
},
96-
},
97-
adapters = {
98-
deepseek = function()
99-
return require('codecompanion.adapters').extend('deepseek', {
100-
name = 'deepseek',
101-
schema = {
102-
model = {
103-
default = 'deepseek-chat',
104-
},
87+
strategies = {
88+
inline = {
89+
adapter = provider,
90+
layout = 'buffer', -- vertical|horizontal|buffer
91+
keymaps = {
92+
accept_change = {
93+
modes = { n = 'ga' },
94+
description = 'Accept the suggested change',
10595
},
106-
})
107-
end,
108-
openai = function()
109-
return require('codecompanion.adapters').extend('openai', {
110-
schema = {
111-
model = {
112-
default = 'gpt-4o-mini',
113-
},
96+
reject_change = {
97+
modes = { n = 'gr' },
98+
description = 'Reject the suggested change',
11499
},
115-
})
116-
end,
117-
},
118-
strategies = {
100+
},
101+
},
119102
chat = {
120-
-- adapter = 'anthropic',
121-
-- adapter = 'gemini',
122103
adapter = provider,
123104
show_settings = true, -- Show LLM settings at the top of the chat buffer?
124-
125105
roles = {
126106
---The header name for the LLM's messages
127107
---@type string|fun(adapter: CodeCompanion.Adapter): string
@@ -200,19 +180,60 @@ M.setup = function()
200180
},
201181
},
202182
},
203-
inline = {
204-
-- adapter = 'anthropic',
205-
-- adapter = 'gemini',
206-
adapter = provider,
207-
keymaps = {
208-
accept_change = {
209-
modes = { n = 'ga' },
210-
description = 'Accept the suggested change',
183+
},
184+
adapters = {
185+
local_copilot = function()
186+
return require('codecompanion.adapters').extend('openai_compatible', {
187+
env = {
188+
url = 'http://localhost:7890',
189+
api_key = 'dummy_apikey',
190+
chat_url = '/v1/chat/completions',
191+
models_endpoint = '/v1/models',
211192
},
212-
reject_change = {
213-
modes = { n = 'gr' },
214-
description = 'Reject the suggested change',
193+
schema = {
194+
model = {
195+
default = 'gpt-4.1',
196+
},
197+
temperature = {
198+
order = 2,
199+
mapping = 'parameters',
200+
type = 'number',
201+
optional = true,
202+
default = 1,
203+
desc = 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.',
204+
validate = function(n)
205+
return n >= 0 and n <= 2, 'Must be between 0 and 2'
206+
end,
207+
},
208+
},
209+
})
210+
end,
211+
deepseek = function()
212+
return require('codecompanion.adapters').extend('deepseek', {
213+
name = 'deepseek',
214+
schema = {
215+
model = {
216+
default = 'deepseek-chat',
217+
},
215218
},
219+
})
220+
end,
221+
openai = function()
222+
return require('codecompanion.adapters').extend('openai', {
223+
schema = {
224+
model = {
225+
default = 'gpt-4.1-mini',
226+
},
227+
},
228+
})
229+
end,
230+
},
231+
display = {
232+
action_palette = {
233+
provider = 'default',
234+
opts = {
235+
show_default_actions = true,
236+
show_default_prompt_library = true,
216237
},
217238
},
218239
},

0 commit comments

Comments
 (0)