-
Notifications
You must be signed in to change notification settings - Fork 854
Expand file tree
/
Copy pathopenai-api.mjs
More file actions
318 lines (292 loc) · 9.59 KB
/
openai-api.mjs
File metadata and controls
318 lines (292 loc) · 9.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
// api version
import { getUserConfig } from '../../config/index.mjs'
import { fetchSSE } from '../../utils/fetch-sse.mjs'
import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs'
import { isEmpty } from 'lodash-es'
import { getCompletionPromptBase, pushRecord, setAbortController } from './shared.mjs'
import { getModelValue, isUsingReasoningModel } from '../../utils/model-name-convert.mjs'
/**
* Extract content from structured response arrays for reasoning models
* @param {Array} contentArray - Array of content segments
* @returns {string} - Extracted text content
*/
function extractContentFromArray(contentArray) {
if (!Array.isArray(contentArray)) {
console.debug('Content is not an array, returning empty string')
return ''
}
try {
const parts = contentArray
.map((part) => {
if (typeof part === 'string') return part
if (part && typeof part === 'object') {
// Prefer output_text segments; fallback to text property
if (typeof part.output_text === 'string') return part.output_text
if (typeof part.text === 'string') return part.text
}
return ''
})
.filter(Boolean)
return parts.join('')
} catch (error) {
console.error('Error extracting content from array:', error)
return ''
}
}
/**
* @param {Browser.Runtime.Port} port
* @param {string} question
* @param {Session} session
* @param {string} apiKey
*/
export async function generateAnswersWithGptCompletionApi(port, question, session, apiKey) {
const { controller, messageListener, disconnectListener } = setAbortController(port)
const model = getModelValue(session)
const config = await getUserConfig()
const prompt =
(await getCompletionPromptBase()) +
getConversationPairs(
session.conversationRecords.slice(-config.maxConversationContextLength),
true,
) +
`Human: ${question}\nAI: `
const apiUrl = config.customOpenAiApiUrl
let answer = ''
let finished = false
const finish = () => {
finished = true
pushRecord(session, question, answer)
console.debug('conversation history', { content: session.conversationRecords })
port.postMessage({ answer: null, done: true, session: session })
}
await fetchSSE(`${apiUrl}/v1/completions`, {
method: 'POST',
signal: controller.signal,
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({
prompt: prompt,
model,
stream: true,
max_tokens: config.maxResponseTokenLength,
temperature: config.temperature,
stop: '\nHuman',
}),
onMessage(message) {
console.debug('sse message', message)
if (finished) return
if (message.trim() === '[DONE]') {
finish()
return
}
let data
try {
data = JSON.parse(message)
} catch (error) {
console.debug('json error', error)
return
}
const choice = data.choices?.[0]
if (!choice) {
console.debug('No choice in response data')
return
}
answer += choice.text
port.postMessage({ answer: answer, done: false, session: null })
if (choice.finish_reason) {
finish()
return
}
},
async onStart() {},
async onEnd() {
port.postMessage({ done: true })
port.onMessage.removeListener(messageListener)
port.onDisconnect.removeListener(disconnectListener)
},
async onError(resp) {
port.onMessage.removeListener(messageListener)
port.onDisconnect.removeListener(disconnectListener)
if (resp instanceof Error) throw resp
const error = await resp.json().catch(() => ({}))
throw new Error(!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`)
},
})
}
/**
* @param {Browser.Runtime.Port} port
* @param {string} question
* @param {Session} session
* @param {string} apiKey
*/
export async function generateAnswersWithChatgptApi(port, question, session, apiKey) {
const config = await getUserConfig()
return generateAnswersWithChatgptApiCompat(
config.customOpenAiApiUrl + '/v1',
port,
question,
session,
apiKey,
)
}
export async function generateAnswersWithChatgptApiCompat(
baseUrl,
port,
question,
session,
apiKey,
extraBody = {},
) {
const { controller, messageListener, disconnectListener } = setAbortController(port)
const model = getModelValue(session)
const isReasoningModel = isUsingReasoningModel(session)
const config = await getUserConfig()
const prompt = getConversationPairs(
session.conversationRecords.slice(-config.maxConversationContextLength),
false,
)
// Filter messages based on model type
// Reasoning models only support 'user' and 'assistant' roles during beta period
const filteredPrompt = isReasoningModel
? prompt.filter((msg) => {
const role = msg?.role
return role === 'user' || role === 'assistant'
})
: prompt
filteredPrompt.push({ role: 'user', content: question })
let answer = ''
let finished = false
const finish = () => {
if (finished) return
finished = true
pushRecord(session, question, answer)
console.debug('conversation history', { content: session.conversationRecords })
port.postMessage({ answer: null, done: true, session })
}
// Build request body with reasoning model-specific parameters
const requestBody = {
messages: filteredPrompt,
model,
...extraBody,
}
// Apply model-specific configurations
if (isReasoningModel) {
// Reasoning models use max_completion_tokens instead of max_tokens
requestBody.max_completion_tokens = config.maxResponseTokenLength
// Reasoning models don't support streaming during beta
requestBody.stream = false
// Reasoning models have fixed parameters during beta
requestBody.temperature = 1
requestBody.top_p = 1
requestBody.n = 1
requestBody.presence_penalty = 0
requestBody.frequency_penalty = 0
// Remove unsupported parameters for reasoning models
delete requestBody.tools
delete requestBody.tool_choice
delete requestBody.functions
delete requestBody.function_call
delete requestBody.max_tokens // Ensure max_tokens is not present
} else {
// Non-reasoning models use the existing behavior
requestBody.stream = true
requestBody.max_tokens = config.maxResponseTokenLength
requestBody.temperature = config.temperature
}
// Validate API key with detailed error message
if (!apiKey || typeof apiKey !== 'string' || !apiKey.trim()) {
throw new Error(
'Invalid or empty API key provided. Please check your OpenAI API key configuration.',
)
}
await fetchSSE(`${baseUrl}/chat/completions`, {
method: 'POST',
signal: controller.signal,
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey.trim()}`,
},
body: JSON.stringify(requestBody),
onMessage(message) {
console.debug('sse message', message)
if (finished) return
if (message.trim() === '[DONE]') {
finish()
return
}
let data
try {
data = JSON.parse(message)
} catch (error) {
console.debug('json error', error)
return
}
// Validate response structure early
const choice = data.choices?.[0]
if (!choice) {
console.debug('No choice in response data')
return
}
if (isReasoningModel) {
// For reasoning models (non-streaming), get the complete response
let content = choice.message?.content ?? choice.text
// Handle structured response arrays for reasoning models
if (Array.isArray(content)) {
content = extractContentFromArray(content)
}
// Ensure content is a string and not empty
if (content && typeof content === 'string') {
const trimmedContent = content.trim()
if (trimmedContent) {
answer = trimmedContent
port.postMessage({ answer, done: false, session: null })
}
} else if (content) {
// Handle unexpected content types gracefully
console.debug('Unexpected content type for reasoning model:', typeof content)
const stringContent = String(content).trim()
if (stringContent) {
answer = stringContent
port.postMessage({ answer, done: false, session: null })
}
}
// Only finish when we have a proper finish reason
if (choice.finish_reason) {
finish()
}
} else {
// For non-reasoning models (streaming), handle delta content
const delta = choice.delta?.content
const content = choice.message?.content
const text = choice.text
if (delta !== undefined) {
answer += delta
} else if (content) {
answer = content
} else if (text) {
answer += text
}
port.postMessage({ answer, done: false, session: null })
if (choice.finish_reason) {
finish()
return
}
}
},
async onStart() {},
async onEnd() {
port.postMessage({ done: true })
port.onMessage.removeListener(messageListener)
port.onDisconnect.removeListener(disconnectListener)
},
async onError(resp) {
port.onMessage.removeListener(messageListener)
port.onDisconnect.removeListener(disconnectListener)
if (resp instanceof Error) throw resp
const error = await resp.json().catch(() => ({}))
throw new Error(!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`)
},
})
}