1
1
//@ts -check
2
2
3
3
var lang = require ( "./lang.js" ) ;
4
- var ChatGPTModels = [
5
- "gpt-3.5-turbo" ,
6
- "gpt-3.5-turbo-16k" ,
7
- "gpt-3.5-turbo-0301" ,
8
- "gpt-3.5-turbo-0613" ,
9
- "gpt-4" ,
10
- "gpt-4-0314" ,
11
- "gpt-4-0613" ,
12
- "gpt-4-32k" ,
13
- "gpt-4-32k-0314" ,
14
- "gpt-4-32k-0613" ,
15
- ] ;
16
4
var HttpErrorCodes = {
17
5
"400" : "Bad Request" ,
18
6
"401" : "Unauthorized" ,
@@ -85,7 +73,7 @@ function buildHeader(isAzureServiceProvider, apiKey) {
85
73
86
74
/**
87
75
* @param {string } basePrompt
88
- * @param {"simplicity" | "detailed" } polishingMode
76
+ * @param {string } polishingMode
89
77
* @param {Bob.TranslateQuery } query
90
78
* @returns {string }
91
79
*/
@@ -150,11 +138,10 @@ function replacePromptKeywords(prompt, query) {
150
138
}
151
139
152
140
/**
153
- * @param {typeof ChatGPTModels[number] } model
154
- * @param {boolean } isChatGPTModel
141
+ * @param {string } model
155
142
* @param {Bob.TranslateQuery } query
156
143
* @returns {{
157
- * model: typeof ChatGPTModels[number] ;
144
+ * model: string ;
158
145
* temperature: number;
159
146
* max_tokens: number;
160
147
* top_p: number;
@@ -167,14 +154,14 @@ function replacePromptKeywords(prompt, query) {
167
154
* prompt?: string;
168
155
* }}
169
156
*/
170
- function buildRequestBody ( model , isChatGPTModel , query ) {
157
+ function buildRequestBody ( model , query ) {
171
158
const { customSystemPrompt, customUserPrompt, polishingMode } = $option ;
172
159
173
160
const systemPrompt = generateSystemPrompt ( replacePromptKeywords ( customSystemPrompt , query ) , polishingMode , query ) ;
174
161
const userPrompt = customUserPrompt ? `${ replacePromptKeywords ( customUserPrompt , query ) } :\n\n"${ query . text } "` : query . text ;
175
162
176
163
const standardBody = {
177
- model,
164
+ model : model ,
178
165
stream : true ,
179
166
temperature : 0.2 ,
180
167
max_tokens : 1000 ,
@@ -183,24 +170,19 @@ function buildRequestBody(model, isChatGPTModel, query) {
183
170
presence_penalty : 1 ,
184
171
} ;
185
172
186
- if ( isChatGPTModel ) {
187
- return {
188
- ...standardBody ,
189
- messages : [
190
- {
191
- role : "system" ,
192
- content : systemPrompt ,
193
- } ,
194
- {
195
- role : "user" ,
196
- content : userPrompt ,
197
- } ,
198
- ] ,
199
- } ;
200
- }
201
173
return {
202
174
...standardBody ,
203
- prompt : `${ systemPrompt } \n\n${ userPrompt } ` ,
175
+ model : model ,
176
+ messages : [
177
+ {
178
+ role : "system" ,
179
+ content : systemPrompt ,
180
+ } ,
181
+ {
182
+ role : "user" ,
183
+ content : userPrompt ,
184
+ } ,
185
+ ] ,
204
186
} ;
205
187
}
206
188
@@ -223,12 +205,11 @@ function handleError(query, result) {
223
205
224
206
/**
225
207
* @param {Bob.TranslateQuery } query
226
- * @param {boolean } isChatGPTModel
227
208
* @param {string } targetText
228
209
* @param {string } textFromResponse
229
210
* @returns {string }
230
211
*/
231
- function handleResponse ( query , isChatGPTModel , targetText , textFromResponse ) {
212
+ function handleResponse ( query , targetText , textFromResponse ) {
232
213
if ( textFromResponse !== '[DONE]' ) {
233
214
try {
234
215
const dataObj = JSON . parse ( textFromResponse ) ;
@@ -244,7 +225,7 @@ function handleResponse(query, isChatGPTModel, targetText, textFromResponse) {
244
225
return targetText ;
245
226
}
246
227
247
- const content = isChatGPTModel ? choices [ 0 ] . delta . content : choices [ 0 ] . text ;
228
+ const content = choices [ 0 ] . delta . content ;
248
229
if ( content !== undefined ) {
249
230
targetText += content ;
250
231
query . onStream ( {
@@ -282,7 +263,18 @@ function translate(query, completion) {
282
263
} ) ;
283
264
}
284
265
285
- const { model, apiKeys, apiUrl, deploymentName } = $option ;
266
+ const { model, customModel, apiKeys, apiVersion, apiUrl, deploymentName } = $option ;
267
+
268
+ const isCustomModelRequired = model === "custom" ;
269
+ if ( isCustomModelRequired && ! customModel ) {
270
+ query . onCompletion ( {
271
+ error : {
272
+ type : "param" ,
273
+ message : "配置错误 - 请确保您在插件配置中填入了正确的自定义模型名称" ,
274
+ addtion : "请在插件配置中填写自定义模型名称" ,
275
+ } ,
276
+ } ) ;
277
+ }
286
278
287
279
if ( ! apiKeys ) {
288
280
completion ( {
@@ -293,20 +285,22 @@ function translate(query, completion) {
293
285
} ,
294
286
} ) ;
295
287
}
288
+
289
+ const modelValue = isCustomModelRequired ? customModel : model ;
290
+
296
291
const trimmedApiKeys = apiKeys . endsWith ( "," ) ? apiKeys . slice ( 0 , - 1 ) : apiKeys ;
297
292
const apiKeySelection = trimmedApiKeys . split ( "," ) . map ( key => key . trim ( ) ) ;
298
293
const apiKey = apiKeySelection [ Math . floor ( Math . random ( ) * apiKeySelection . length ) ] ;
299
294
300
295
const modifiedApiUrl = ensureHttpsAndNoTrailingSlash ( apiUrl || "https://api.openai.com" ) ;
301
296
302
- const isChatGPTModel = ChatGPTModels . includes ( model ) ;
303
297
const isAzureServiceProvider = modifiedApiUrl . includes ( "openai.azure.com" ) ;
304
- let apiUrlPath = isChatGPTModel ? "/v1/chat/completions" : "/v1/completions" ;
298
+ let apiUrlPath = "/v1/chat/completions" ;
299
+ const apiVersionQuery = apiVersion ? `?api-version=${ apiVersion } ` : "?api-version=2023-03-15-preview" ;
305
300
306
301
if ( isAzureServiceProvider ) {
307
302
if ( deploymentName ) {
308
- apiUrlPath = `/openai/deployments/${ deploymentName } ` ;
309
- apiUrlPath += isChatGPTModel ? "/chat/completions?api-version=2023-03-15-preview" : "/completions?api-version=2022-12-01" ;
303
+ apiUrlPath = `/openai/deployments/${ deploymentName } /chat/completions${ apiVersionQuery } ` ;
310
304
} else {
311
305
completion ( {
312
306
error : {
@@ -319,7 +313,7 @@ function translate(query, completion) {
319
313
}
320
314
321
315
const header = buildHeader ( isAzureServiceProvider , apiKey ) ;
322
- const body = buildRequestBody ( model , isChatGPTModel , query ) ;
316
+ const body = buildRequestBody ( modelValue , query ) ;
323
317
324
318
let targetText = "" ; // 初始化拼接结果变量
325
319
let buffer = "" ; // 新增 buffer 变量
@@ -348,7 +342,7 @@ function translate(query, completion) {
348
342
if ( match ) {
349
343
// 如果是一个完整的消息,处理它并从缓冲变量中移除
350
344
const textFromResponse = match [ 1 ] . trim ( ) ;
351
- targetText = handleResponse ( query , isChatGPTModel , targetText , textFromResponse ) ;
345
+ targetText = handleResponse ( query , targetText , textFromResponse ) ;
352
346
buffer = buffer . slice ( match [ 0 ] . length ) ;
353
347
} else {
354
348
// 如果没有完整的消息,等待更多的数据
0 commit comments