-
-
Notifications
You must be signed in to change notification settings - Fork 406
Expand file tree
/
Copy pathopenai.lua
More file actions
585 lines (541 loc) · 18.6 KB
/
openai.lua
File metadata and controls
585 lines (541 loc) · 18.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
local adapter_utils = require("codecompanion.utils.adapters")
local log = require("codecompanion.utils.log")
local CONSTANTS = {
STANDARD_MESSAGE_FIELDS = {
-- fields that are defined in the standard openai chat-completion API (inc. streaming and non-streaming)
"content",
"function_call",
"refusal",
"role",
"tool_calls",
"annotations",
"audio",
},
}
---Find the non-standard fields in the `message` or `delta` that are not in the standard OpenAI chat-completion specs.
---@param delta table?
---@return table|nil
local function find_extra_fields(delta)
if delta == nil then
return nil
end
local extra = {}
vim.iter(delta):each(function(k, v)
if not vim.list_contains(CONSTANTS.STANDARD_MESSAGE_FIELDS, k) then
extra[k] = v
end
end)
if not vim.tbl_isempty(extra) then
return extra
end
end
---@class CodeCompanion.HTTPAdapter.OpenAI: CodeCompanion.HTTPAdapter
return {
name = "openai",
formatted_name = "OpenAI",
roles = {
llm = "assistant",
user = "user",
tool = "tool",
},
opts = {
stream = true,
tools = true,
vision = true,
},
features = {
text = true,
tokens = true,
},
url = "https://api.openai.com/v1/chat/completions",
env = {
api_key = "OPENAI_API_KEY",
},
headers = {
["Content-Type"] = "application/json",
Authorization = "Bearer ${api_key}",
},
handlers = {
---@param self CodeCompanion.HTTPAdapter
---@return boolean
setup = function(self)
local model = self.schema.model.default
if type(model) == "function" then
model = model(self)
end
local model_opts = self.schema.model.choices
if type(model_opts) == "function" then
model_opts = model_opts(self)
end
self.opts.vision = true
if model_opts and model_opts[model] and model_opts[model].opts then
self.opts = vim.tbl_deep_extend("force", self.opts, model_opts[model].opts)
if not model_opts[model].opts.has_vision then
self.opts.vision = false
end
end
if self.opts and self.opts.stream then
self.parameters.stream = true
self.parameters.stream_options = { include_usage = true }
end
return true
end,
---Set the parameters
---@param self CodeCompanion.HTTPAdapter
---@param params table
---@param messages table
---@return table
form_parameters = function(self, params, messages)
return params
end,
---Set the format of the role and content for the messages from the chat buffer
---@param self CodeCompanion.HTTPAdapter
---@param messages table Format is: { { role = "user", content = "Your prompt here" } }
---@return table
form_messages = function(self, messages)
local model = self.schema.model.default
if type(model) == "function" then
model = model(self)
end
messages = vim
.iter(messages)
:map(function(m)
if vim.startswith(model, "o1") and m.role == "system" then
m.role = self.roles.user
end
-- Ensure tool_calls are clean
local tool_calls = nil
if m.tools and m.tools.calls then
tool_calls = vim
.iter(m.tools.calls)
:map(function(tool_call)
return {
id = tool_call.id,
["function"] = tool_call["function"],
type = tool_call.type,
-- Include a _meta field to hold everything else
}
end)
:totable()
end
-- Process any images
if m._meta and m._meta.tag == "image" and m.context and m.context.mimetype then
if self.opts and self.opts.vision then
m.content = {
{
type = "image_url",
image_url = {
url = string.format("data:%s;base64,%s", m.context.mimetype, m.content),
},
},
}
else
-- Remove the message if vision is not supported
return nil
end
end
local result = {
role = m.role,
content = m.content,
tool_calls = tool_calls,
tool_call_id = m.tools and m.tools.call_id or nil,
}
-- Adapter's like Copilot have reasoning fields that must be preserved
if m.reasoning then
result.reasoning = m.reasoning
end
return result
end)
:totable()
return { messages = messages }
end,
---Provides the schemas of the tools that are available to the LLM to call
---@param self CodeCompanion.HTTPAdapter
---@param tools table<string, table>
---@return table|nil
form_tools = function(self, tools)
if not self.opts.tools or not tools then
return nil
end
if vim.tbl_count(tools) == 0 then
return nil
end
local transformed = {}
for _, tool in pairs(tools) do
for _, schema in pairs(tool) do
table.insert(transformed, schema)
end
end
return { tools = transformed }
end,
---Returns the number of tokens generated from the LLM
---@param self CodeCompanion.HTTPAdapter
---@param data table The data from the LLM
---@return number|nil
tokens = function(self, data)
if data and data ~= "" then
local data_mod = adapter_utils.clean_streamed_data(data)
local ok, json = pcall(vim.json.decode, data_mod, { luanil = { object = true } })
if ok then
if json.usage then
local tokens = json.usage.total_tokens
log:trace("Tokens: %s", tokens)
return tokens
end
end
end
end,
---Output the data from the API ready for insertion into the chat buffer
---@param self CodeCompanion.HTTPAdapter
---@param data table The streamed JSON data from the API, also formatted by the format_data handler
---@param tools? table The table to write any tool output to
---@return table|nil [status: string, output: table]
chat_output = function(self, data, tools)
if not data or data == "" then
return nil
end
-- Handle both streamed data and structured response
local data_mod = type(data) == "table" and data.body or adapter_utils.clean_streamed_data(data)
local ok, json = pcall(vim.json.decode, data_mod, { luanil = { object = true } })
if not ok or not json.choices or #json.choices == 0 then
return nil
end
-- Define standard tool_call fields
local STANDARD_TOOL_CALL_FIELDS = {
"id",
"type",
"function",
"index",
}
---Helper to create any tool data
---@param tool table
---@param index number
---@param id string
---@return table
local function create_tool_data(tool, index, id)
local tool_data = {
_index = index,
id = id,
type = tool.type,
["function"] = {
name = tool["function"]["name"],
arguments = tool["function"]["arguments"] or "",
},
}
-- Preserve any non-standard fields as-is
for key, value in pairs(tool) do
if not vim.tbl_contains(STANDARD_TOOL_CALL_FIELDS, key) then
tool_data[key] = value
end
end
return tool_data
end
-- Process tool calls from all choices
if self.opts.tools and tools then
for _, choice in ipairs(json.choices) do
local delta = self.opts.stream and choice.delta or choice.message
if delta and delta.tool_calls and #delta.tool_calls > 0 then
for i, tool in ipairs(delta.tool_calls) do
local tool_index = tool.index and tonumber(tool.index) or i
-- Some endpoints like Gemini do not set this (why?!)
local id = tool.id
if not id or id == "" then
id = string.format("call_%s_%s", json.created, i)
end
if self.opts.stream then
local found = false
for _, existing_tool in ipairs(tools) do
if existing_tool._index == tool_index then
-- Append to arguments if this is a continuation of a stream
if tool["function"] and tool["function"]["arguments"] then
existing_tool["function"]["arguments"] = (existing_tool["function"]["arguments"] or "")
.. tool["function"]["arguments"]
end
found = true
break
end
end
if not found then
table.insert(tools, create_tool_data(tool, tool_index, id))
end
else
table.insert(tools, create_tool_data(tool, i, id))
end
end
end
end
end
-- Process message content from the first choice
local choice = json.choices[1]
local delta = self.opts.stream and choice.delta or choice.message
if not delta then
return nil
end
return {
status = "success",
output = {
role = delta.role,
content = delta.content,
},
extra = find_extra_fields(delta),
}
end,
---Output the data from the API ready for inlining into the current buffer
---@param self CodeCompanion.HTTPAdapter
---@param data string|table The streamed JSON data from the API, also formatted by the format_data handler
---@param context? table Useful context about the buffer to inline to
---@return {status: string, output: table}|nil
inline_output = function(self, data, context)
if self.opts.stream then
return log:error("Inline output is not supported for non-streaming models")
end
if data and data ~= "" then
local ok, json = pcall(vim.json.decode, data.body, { luanil = { object = true } })
if not ok then
log:error("Error decoding JSON: %s", data.body)
return { status = "error", output = json }
end
local choice = json.choices[1]
if choice.message.content then
return { status = "success", output = choice.message.content }
end
end
end,
tools = {
---Format the LLM's tool calls for inclusion back in the request
---@param self CodeCompanion.HTTPAdapter
---@param tools table The raw tools collected by chat_output
---@return table
format_tool_calls = function(self, tools)
-- Source: https://platform.openai.com/docs/guides/function-calling?api-mode=chat#handling-function-calls
return tools
end,
---Output the LLM's tool call so we can include it in the messages
---@param self CodeCompanion.HTTPAdapter
---@param tool_call {id: string, function: table, name: string}
---@param output string
---@return table
output_response = function(self, tool_call, output)
-- Source: https://platform.openai.com/docs/guides/function-calling?api-mode=chat#handling-function-calls
return {
role = self.roles.tool or "tool",
tools = {
call_id = tool_call.id,
name = tool_call["function"].name,
},
content = output,
opts = { visible = false },
}
end,
},
---Function to run when the request has completed. Useful to catch errors
---@param self CodeCompanion.HTTPAdapter
---@param data? table
---@return nil
on_exit = function(self, data)
if data and data.status >= 400 then
log:error("Error: %s", data.body)
end
end,
},
schema = {
model = {
order = 1,
mapping = "parameters",
type = "enum",
desc = "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
---@type string|fun(): string
default = "gpt-4.1",
choices = {
-- Frontier models
["gpt-5.4"] = {
formatted_name = "GPT 5.4",
meta = { context_window = 1050000 },
opts = { has_vision = true, can_reason = true },
},
["gpt-5.4-mini"] = {
formatted_name = "GPT 5.4 Mini",
meta = { context_window = 400000 },
opts = { has_vision = true, can_reason = true },
},
["gpt-5.4-nano"] = {
formatted_name = "GPT 5.4 Nano",
meta = { context_window = 400000 },
opts = { has_vision = true, can_reason = true },
},
["gpt-5"] = {
formatted_name = "GPT 5",
meta = { context_window = 400000 },
opts = { has_vision = true, can_reason = true },
},
["gpt-5-mini"] = {
formatted_name = "GPT 5 Mini",
meta = { context_window = 400000 },
opts = { has_vision = true, can_reason = true },
},
["gpt-5-nano"] = {
formatted_name = "GPT 5 Nano",
meta = { context_window = 400000 },
opts = { has_vision = true, can_reason = true },
},
["gpt-4.1"] = {
formatted_name = "GPT 4.1",
meta = { context_window = 1047576 },
opts = { has_vision = true },
},
-- Older models
["o4-mini-2025-04-16"] = {
formatted_name = "o4 Mini",
opts = { has_vision = true, can_reason = true },
},
["o3-mini-2025-01-31"] = {
formatted_name = "o3 Mini",
opts = { can_reason = true },
},
["o3-2025-04-16"] = {
formatted_name = "o3",
opts = { has_vision = true, can_reason = true },
},
["o1-2024-12-17"] = {
formatted_name = "o1",
opts = { has_vision = true, can_reason = true },
},
["gpt-4o"] = {
formatted_name = "GPT-4o",
opts = { has_vision = true },
},
["gpt-4o-mini"] = {
formatted_name = "GPT-4o Mini",
opts = { has_vision = true },
},
"gpt-3.5-turbo",
},
},
reasoning_effort = {
order = 2,
mapping = "parameters",
type = "string",
optional = true,
---@type fun(self: CodeCompanion.HTTPAdapter): boolean
enabled = function(self)
local model = self.schema.model.default
if type(model) == "function" then
model = model()
end
local choices = self.schema.model.choices
if type(choices) == "function" then
choices = choices(self)
end
if choices and choices[model] and choices[model].opts and choices[model].opts.can_reason then
return true
end
return false
end,
default = "medium",
desc = "Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.",
choices = {
"high",
"medium",
"low",
"minimal",
},
},
temperature = {
order = 3,
mapping = "parameters",
type = "number",
optional = true,
default = 1,
desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
validate = function(n)
return n >= 0 and n <= 2, "Must be between 0 and 2"
end,
},
top_p = {
order = 4,
mapping = "parameters",
type = "number",
optional = true,
default = 1,
desc = "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.",
validate = function(n)
return n >= 0 and n <= 1, "Must be between 0 and 1"
end,
},
stop = {
order = 5,
mapping = "parameters",
type = "list",
optional = true,
default = nil,
subtype = {
type = "string",
},
desc = "Up to 4 sequences where the API will stop generating further tokens.",
validate = function(l)
return #l >= 1 and #l <= 4, "Must have between 1 and 4 elements"
end,
},
max_tokens = {
order = 6,
mapping = "parameters",
type = "integer",
optional = true,
default = nil,
desc = "The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.",
validate = function(n)
return n > 0, "Must be greater than 0"
end,
},
presence_penalty = {
order = 7,
mapping = "parameters",
type = "number",
optional = true,
default = 0,
desc = "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
validate = function(n)
return n >= -2 and n <= 2, "Must be between -2 and 2"
end,
},
frequency_penalty = {
order = 8,
mapping = "parameters",
type = "number",
optional = true,
default = 0,
desc = "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
validate = function(n)
return n >= -2 and n <= 2, "Must be between -2 and 2"
end,
},
logit_bias = {
order = 9,
mapping = "parameters",
type = "map",
optional = true,
default = nil,
desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
subtype_key = {
type = "integer",
},
subtype = {
type = "integer",
validate = function(n)
return n >= -100 and n <= 100, "Must be between -100 and 100"
end,
},
},
user = {
order = 10,
mapping = "parameters",
type = "string",
optional = true,
default = nil,
desc = "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.",
validate = function(u)
return u:len() < 100, "Cannot be longer than 100 characters"
end,
},
},
}