clash-linux commited on
Commit
29cc386
·
verified ·
1 Parent(s): 5ff8b45

Upload 14 files

Browse files
Dockerfile CHANGED
@@ -1,30 +1,19 @@
1
- FROM node:lts-alpine
2
-
3
- # 设置工作目录
4
- # The WORKDIR instruction creates the directory if it doesn't exist
5
- # and sets it as the current working directory.
6
- # By default, it's created by root. We will chown contents later.
7
- WORKDIR /app
8
-
9
- # 复制package.json和package-lock.json
10
- # The 'node' user and 'node' group are typically pre-defined in node alpine images with UID/GID 1000
11
- COPY --chown=node:node package*.json ./
12
-
13
- # 安装依赖
14
- # Running npm install as root is common to ensure all global and local dependencies are installed correctly.
15
- # The node_modules directory will be owned by root.
16
- # If you need the node_modules to be owned by the node user,
17
- # you could run npm install after USER node, but ensure /app is writable by node.
18
- RUN npm install
19
-
20
- # 复制源代码
21
- COPY --chown=node:node . .
22
-
23
- # 切换到 "node" 用户
24
- USER node
25
-
26
- # 暴露端口
27
- EXPOSE 3000
28
-
29
- # 启动命令
30
  CMD ["npm", "start"]
 
1
+ FROM node:lts-alpine
2
+
3
+ # 设置工作目录
4
+ WORKDIR /app
5
+
6
+ # 复制package.json和package-lock.json
7
+ COPY package*.json ./
8
+
9
+ # 安装依赖
10
+ RUN npm install
11
+
12
+ # 复制源代码
13
+ COPY . .
14
+
15
+ # 暴露端口
16
+ EXPOSE 3000
17
+
18
+ # 启动命令
 
 
 
 
 
 
 
 
 
 
 
19
  CMD ["npm", "start"]
docker-compose.yml CHANGED
@@ -1,10 +1,11 @@
1
  services:
2
  promptlayer-proxy:
3
- image: rfym21/promptlayer-proxy:latest
4
- container_name: promptlayer-proxy
 
5
  restart: always
6
  ports:
7
  - "3000:3000"
8
  environment:
9
- - ACCOUNTS=your_account:your_password # 填入promptlayer账号密码,账号密码之间用:隔开,多个账号用逗号分隔
10
- - AUTH_TOKEN=your_auth_token_here # 设置API认证密钥
 
1
  services:
2
  promptlayer-proxy:
3
+ build:
4
+ context: .
5
+ dockerfile: Dockerfile
6
  restart: always
7
  ports:
8
  - "3000:3000"
9
  environment:
10
+ - ACCOUNTS= # 填入promptlayer账号密码authorization header,账号密码authorization之间用:隔开,多个账号用逗号分隔 (name:password:ey......)
11
+ - AUTH_TOKEN= # 设置API认证密钥
package.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "name": "promptlayer-proxy",
3
- "version": "3.0.1",
4
  "description": "",
5
  "main": "src/server.js",
6
  "scripts": {
@@ -9,7 +9,8 @@
9
  },
10
  "keywords": [
11
  "api",
12
- "promptlayer",
 
13
  "express"
14
  ],
15
  "author": "",
 
1
  {
2
+ "name": "claude-api-server",
3
+ "version": "1.0.0",
4
  "description": "",
5
  "main": "src/server.js",
6
  "scripts": {
 
9
  },
10
  "keywords": [
11
  "api",
12
+ "claude",
13
+ "openai",
14
  "express"
15
  ],
16
  "author": "",
src/lib/manager.js CHANGED
@@ -14,11 +14,11 @@ class Manager {
14
  async init(accounts) {
15
  accounts = accounts.split(",").filter(account => account.trim() !== "")
16
  for (const account of accounts) {
17
- const [username, password] = account.split(":")
18
  this.accounts.push({
19
  username,
20
  password,
21
- token: null,
22
  clientId: null,
23
  workspaceId: null,
24
  access_token: null,
@@ -64,7 +64,7 @@ class Manager {
64
  return { access_token, clientId }
65
  }
66
  } catch (error) {
67
- // console.error('获取clientId失败:', error)
68
  return false
69
  }
70
  }
@@ -87,7 +87,8 @@ class Manager {
87
  }
88
 
89
  async initAccount(account) {
90
- const token = await this.login(account.username, account.password)
 
91
  if (!token) {
92
  return false
93
  }
@@ -119,7 +120,7 @@ class Manager {
119
  return null
120
  }
121
 
122
- if (!account.token) {
123
  console.log(`初始化账户: ${account.username}`)
124
  const initialized = await this.initAccount(account)
125
  if (!initialized) {
 
14
  async init(accounts) {
15
  accounts = accounts.split(",").filter(account => account.trim() !== "")
16
  for (const account of accounts) {
17
+ const [username, password, token] = account.split(":")
18
  this.accounts.push({
19
  username,
20
  password,
21
+ token: token,
22
  clientId: null,
23
  workspaceId: null,
24
  access_token: null,
 
64
  return { access_token, clientId }
65
  }
66
  } catch (error) {
67
+ console.error('获取clientId失败:', error)
68
  return false
69
  }
70
  }
 
87
  }
88
 
89
  async initAccount(account) {
90
+ // const token = await this.login(account.username, account.password)
91
+ const token = account.token
92
  if (!token) {
93
  return false
94
  }
 
120
  return null
121
  }
122
 
123
+ if (!account.access_token) {
124
  console.log(`初始化账户: ${account.username}`)
125
  const initialized = await this.initAccount(account)
126
  if (!initialized) {
src/lib/model-map.js CHANGED
@@ -1,205 +1,688 @@
1
- const modelMap = {
2
- "claude-3-7-sonnet-20250219": {
3
- "provider": "anthropic",
4
- "name": "claude-3-7-sonnet-latest",
5
- "model_config_display_name": null,
6
- "parameters": {
7
- "max_tokens": 64000,
8
- "temperature": 1,
9
- "top_k": 0,
10
- "top_p": 0
11
- }
12
- },
13
- "claude-3-7-sonnet-20250219-thinking": {
14
- "provider": "anthropic",
15
- "name": "claude-3-7-sonnet-latest",
16
- "model_config_display_name": null,
17
- "parameters": {
18
- "max_tokens": 64000,
19
- "thinking": {
20
- "type": "enabled",
21
- "budget_tokens": 32000
22
- }
23
- }
24
- },
25
- "claude-sonnet-4-20250514": {
26
- "provider": "anthropic",
27
- "name": "claude-sonnet-4-20250514",
28
- "model_config_display_name": null,
29
- "parameters": {
30
- "max_tokens": 64000,
31
- "temperature": 1,
32
- "top_k": 0,
33
- "top_p": 0
34
- }
35
- },
36
- "claude-sonnet-4-20250514-thinking": {
37
- "provider": "anthropic",
38
- "name": "claude-sonnet-4-20250514",
39
- "model_config_display_name": null,
40
- "parameters": {
41
- "max_tokens": 64000,
42
- "thinking": {
43
- "type": "enabled",
44
- "budget_tokens": 32000
45
- }
46
- }
47
- },
48
- "claude-opus-4-20250514": {
49
- "provider": "anthropic",
50
- "name": "claude-opus-4-20250514",
51
- "model_config_display_name": null,
52
- "parameters": {
53
- "max_tokens": 32000,
54
- "temperature": 1,
55
- "top_k": 0,
56
- "top_p": 0
57
- }
58
- },
59
- "claude-opus-4-20250514-thinking": {
60
- "provider": "anthropic",
61
- "name": "claude-opus-4-20250514",
62
- "model_config_display_name": null,
63
- "parameters": {
64
- "max_tokens": 32000,
65
- "thinking": {
66
- "type": "enabled",
67
- "budget_tokens": 16000
68
- }
69
- }
70
- },
71
- "o1": {
72
- "provider": "openai",
73
- "name": "o1",
74
- "model_config_display_name": null,
75
- "parameters": {
76
- "response_format": {
77
- "type": "text"
78
- },
79
- "reasoning_effort": "high",
80
- "max_completion_tokens": 100000
81
- }
82
- },
83
- "o4-mini": {
84
- "provider": "openai",
85
- "name": "o4-mini",
86
- "model_config_display_name": null,
87
- "parameters": {
88
- "response_format": {
89
- "type": "text"
90
- },
91
- "reasoning_effort": "medium",
92
- "max_completion_tokens": 100000
93
- }
94
- },
95
- "o4-mini-high": {
96
- "provider": "openai",
97
- "name": "o4-mini",
98
- "model_config_display_name": null,
99
- "parameters": {
100
- "response_format": {
101
- "type": "text"
102
- },
103
- "reasoning_effort": "high",
104
- "max_completion_tokens": 100000
105
- }
106
- },
107
- "o3": {
108
- "provider": "openai",
109
- "name": "o3",
110
- "model_config_display_name": null,
111
- "parameters": {
112
- "response_format": {
113
- "type": "text"
114
- },
115
- "reasoning_effort": "high",
116
- "max_completion_tokens": 100000
117
- }
118
- },
119
- "o3-mini": {
120
- "provider": "openai",
121
- "name": "o3",
122
- "model_config_display_name": null,
123
- "parameters": {
124
- "response_format": {
125
- "type": "text"
126
- },
127
- "reasoning_effort": "medium",
128
- "max_completion_tokens": 100000
129
- }
130
- },
131
- "o3-mini-high": {
132
- "provider": "openai",
133
- "name": "o3",
134
- "model_config_display_name": null,
135
- "parameters": {
136
- "response_format": {
137
- "type": "text"
138
- },
139
- "reasoning_effort": "high",
140
- "max_completion_tokens": 100000
141
- }
142
- },
143
- "chatgpt-4o-latest": {
144
- "provider": "openai",
145
- "name": "chatgpt-4o-latest",
146
- "model_config_display_name": null,
147
- "parameters": {
148
- "temperature": 1,
149
- "seed": 0,
150
- "response_format": null,
151
- "top_p": 1,
152
- "frequency_penalty": 0,
153
- "presence_penalty": 0
154
- }
155
- },
156
- "gpt-4.1": {
157
- "provider": "openai",
158
- "name": "gpt-4.1",
159
- "model_config_display_name": null,
160
- "parameters": {
161
- "temperature": 1,
162
- "seed": 0,
163
- "response_format": null,
164
- "top_p": 1
165
- }
166
- },
167
- "gpt-4.1-mini": {
168
- "provider": "openai",
169
- "name": "gpt-4.1-mini",
170
- "model_config_display_name": null,
171
- "parameters": {
172
- "temperature": 1,
173
- "seed": 0,
174
- "response_format": null,
175
- "top_p": 1
176
- }
177
- },
178
- "gpt-4.1-nano": {
179
- "provider": "openai",
180
- "name": "gpt-4.1-nano",
181
- "model_config_display_name": null,
182
- "parameters": {
183
- "temperature": 1,
184
- "seed": 0,
185
- "response_format": null,
186
- "top_p": 1
187
- }
188
- },
189
- "gpt-4.5-preview": {
190
- "provider": "openai",
191
- "name": "gpt-4.5-preview",
192
- "model_config_display_name": null,
193
- "parameters": {
194
- "temperature": 1,
195
- "seed": 0,
196
- "response_format": null,
197
- "top_p": 1,
198
- "frequency_penalty": 0,
199
- "presence_penalty": 0
200
- }
201
- }
202
-
203
- }
204
-
205
- module.exports = modelMap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const modelMap = {
2
+ "claude-3-7-sonnet-20250219": {
3
+ "provider": "anthropic",
4
+ "name": "claude-3-7-sonnet-latest",
5
+ "model_config_display_name": null,
6
+ "parameters": {
7
+ "max_tokens": 64000,
8
+ "temperature": 1,
9
+ "top_k": 0,
10
+ "top_p": 0
11
+ }
12
+ },
13
+ "claude-3-7-sonnet-20250219-thinking": {
14
+ "provider": "anthropic",
15
+ "name": "claude-3-7-sonnet-latest",
16
+ "model_config_display_name": null,
17
+ "parameters": {
18
+ "max_tokens": 64000,
19
+ "thinking": {
20
+ "type": "enabled",
21
+ "budget_tokens": 32000
22
+ }
23
+ }
24
+ },
25
+ "claude-sonnet-4-20250514": {
26
+ "provider": "anthropic",
27
+ "name": "claude-sonnet-4-20250514",
28
+ "model_config_display_name": null,
29
+ "parameters": {
30
+ "max_tokens": 64000,
31
+ "temperature": 1,
32
+ "top_k": 0,
33
+ "top_p": 0
34
+ }
35
+ },
36
+ "claude-sonnet-4-20250514-thinking": {
37
+ "provider": "anthropic",
38
+ "name": "claude-sonnet-4-20250514",
39
+ "model_config_display_name": null,
40
+ "parameters": {
41
+ "max_tokens": 64000,
42
+ "thinking": {
43
+ "type": "enabled",
44
+ "budget_tokens": 32000
45
+ }
46
+ }
47
+ },
48
+ "claude-opus-4-20250514": {
49
+ "provider": "anthropic",
50
+ "name": "claude-opus-4-20250514",
51
+ "model_config_display_name": null,
52
+ "parameters": {
53
+ "max_tokens": 32000,
54
+ "temperature": 1,
55
+ "top_k": 0,
56
+ "top_p": 0
57
+ }
58
+ },
59
+ "claude-opus-4-20250514-thinking": {
60
+ "provider": "anthropic",
61
+ "name": "claude-opus-4-20250514",
62
+ "model_config_display_name": null,
63
+ "parameters": {
64
+ "max_tokens": 32000,
65
+ "thinking": {
66
+ "type": "enabled",
67
+ "budget_tokens": 16000
68
+ }
69
+ }
70
+ },
71
+ "o4-mini": {
72
+ "provider": "openai",
73
+ "name": "o4-mini",
74
+ "model_config_display_name": null,
75
+ "parameters": {
76
+ "response_format": {
77
+ "type": "text"
78
+ },
79
+ "reasoning_effort": "medium",
80
+ "max_completion_tokens": 100000
81
+ }
82
+ },
83
+ "o4-mini-high": {
84
+ "provider": "openai",
85
+ "name": "o4-mini",
86
+ "model_config_display_name": null,
87
+ "parameters": {
88
+ "response_format": {
89
+ "type": "text"
90
+ },
91
+ "reasoning_effort": "high",
92
+ "max_completion_tokens": 100000
93
+ }
94
+ },
95
+ "o3-mini": {
96
+ "provider": "openai",
97
+ "name": "o3",
98
+ "model_config_display_name": null,
99
+ "parameters": {
100
+ "response_format": {
101
+ "type": "text"
102
+ },
103
+ "reasoning_effort": "medium",
104
+ "max_completion_tokens": 100000
105
+ }
106
+ },
107
+ "o3-mini-high": {
108
+ "provider": "openai",
109
+ "name": "o3",
110
+ "model_config_display_name": null,
111
+ "parameters": {
112
+ "response_format": {
113
+ "type": "text"
114
+ },
115
+ "reasoning_effort": "high",
116
+ "max_completion_tokens": 100000
117
+ }
118
+ },
119
+ "chatgpt-4o-latest": {
120
+ "provider": "openai",
121
+ "name": "chatgpt-4o-latest",
122
+ "model_config_display_name": null,
123
+ "parameters": {
124
+ "temperature": 1,
125
+ "seed": 0,
126
+ "response_format": null,
127
+ "top_p": 1,
128
+ "frequency_penalty": 0,
129
+ "presence_penalty": 0
130
+ }
131
+ },
132
+ "gpt-4o": {
133
+ "provider": "openai",
134
+ "name": "gpt-4o",
135
+ "model_config_display_name": null,
136
+ "parameters": {
137
+ "temperature": 1,
138
+ "seed": 0,
139
+ "response_format": null,
140
+ "top_p": 1,
141
+ "frequency_penalty": 0,
142
+ "presence_penalty": 0
143
+ }
144
+ },
145
+ "gpt-4o-2024-11-20": {
146
+ "provider": "openai",
147
+ "name": "gpt-4o-2024-11-20",
148
+ "model_config_display_name": null,
149
+ "parameters": {
150
+ "temperature": 1,
151
+ "seed": 0,
152
+ "response_format": null,
153
+ "top_p": 1,
154
+ "frequency_penalty": 0,
155
+ "presence_penalty": 0
156
+ }
157
+ },
158
+ "gpt-4o-2024-08-06": {
159
+ "provider": "openai",
160
+ "name": "gpt-4o-2024-08-06",
161
+ "model_config_display_name": null,
162
+ "parameters": {
163
+ "temperature": 1,
164
+ "seed": 0,
165
+ "response_format": null,
166
+ "top_p": 1,
167
+ "frequency_penalty": 0,
168
+ "presence_penalty": 0
169
+ }
170
+ },
171
+ "gpt-4o-2024-05-13": {
172
+ "provider": "openai",
173
+ "name": "gpt-4o-2024-05-13",
174
+ "model_config_display_name": null,
175
+ "parameters": {
176
+ "temperature": 1,
177
+ "seed": 0,
178
+ "response_format": null,
179
+ "top_p": 1,
180
+ "frequency_penalty": 0,
181
+ "presence_penalty": 0
182
+ }
183
+ },
184
+ "gpt-4o-mini": {
185
+ "provider": "openai",
186
+ "name": "gpt-4o-mini",
187
+ "model_config_display_name": null,
188
+ "parameters": {
189
+ "temperature": 1,
190
+ "seed": 0,
191
+ "response_format": null,
192
+ "top_p": 1,
193
+ "frequency_penalty": 0,
194
+ "presence_penalty": 0
195
+ }
196
+ },
197
+ "gpt-4o-mini-2024-07-18": {
198
+ "provider": "openai",
199
+ "name": "gpt-4o-mini-2024-07-18",
200
+ "model_config_display_name": null,
201
+ "parameters": {
202
+ "temperature": 1,
203
+ "seed": 0,
204
+ "response_format": null,
205
+ "top_p": 1,
206
+ "frequency_penalty": 0,
207
+ "presence_penalty": 0
208
+ }
209
+ },
210
+ "gpt-4o-search-preview": {
211
+ "provider": "openai",
212
+ "name": "gpt-4o-search-preview",
213
+ "model_config_display_name": null,
214
+ "parameters": {
215
+ "response_format": {
216
+ "type": "text"
217
+ },
218
+ "web_search_options": {
219
+ "search_context_size": "high",
220
+ "user_location": {
221
+ "approximate": {
222
+ "city": "New York",
223
+ "country": "US",
224
+ "region": "New York",
225
+ "timezone": "America/New_York"
226
+ },
227
+ "type": "approximate"
228
+ }
229
+ }
230
+ }
231
+ },
232
+ "gpt-4.1": {
233
+ "provider": "openai",
234
+ "name": "gpt-4.1",
235
+ "model_config_display_name": null,
236
+ "parameters": {
237
+ "temperature": 1,
238
+ "seed": 0,
239
+ "response_format": null,
240
+ "top_p": 1
241
+ }
242
+ },
243
+ "gpt-4.1-mini": {
244
+ "provider": "openai",
245
+ "name": "gpt-4.1-mini",
246
+ "model_config_display_name": null,
247
+ "parameters": {
248
+ "temperature": 1,
249
+ "seed": 0,
250
+ "response_format": null,
251
+ "top_p": 1
252
+ }
253
+ },
254
+ "gpt-4.1-mini-2025-04-14": {
255
+ "provider": "openai",
256
+ "name": "gpt-4.1-mini-2025-04-14",
257
+ "model_config_display_name": null,
258
+ "parameters": {
259
+ "temperature": 1,
260
+ "seed": 0,
261
+ "response_format": null,
262
+ "top_p": 1
263
+ }
264
+ },
265
+ "gpt-4.1-nano": {
266
+ "provider": "openai",
267
+ "name": "gpt-4.1-nano",
268
+ "model_config_display_name": null,
269
+ "parameters": {
270
+ "temperature": 1,
271
+ "seed": 0,
272
+ "response_format": null,
273
+ "top_p": 1
274
+ }
275
+ },
276
+ "gpt-4.1-nano-2025-04-14": {
277
+ "provider": "openai",
278
+ "name": "gpt-4.1-nano-2025-04-14",
279
+ "model_config_display_name": null,
280
+ "parameters": {
281
+ "temperature": 1,
282
+ "seed": 0,
283
+ "response_format": null,
284
+ "top_p": 1
285
+ }
286
+ },
287
+ "gpt-4.1-2025-04-14": {
288
+ "provider": "openai",
289
+ "name": "gpt-4.1-2025-04-14",
290
+ "model_config_display_name": null,
291
+ "parameters": {
292
+ "temperature": 1,
293
+ "seed": 0,
294
+ "response_format": null,
295
+ "top_p": 1
296
+ }
297
+ },
298
+ "gpt-4.5-preview": {
299
+ "provider": "openai",
300
+ "name": "gpt-4.5-preview",
301
+ "model_config_display_name": null,
302
+ "parameters": {
303
+ "temperature": 1,
304
+ "seed": 0,
305
+ "response_format": null,
306
+ "top_p": 1,
307
+ "frequency_penalty": 0,
308
+ "presence_penalty": 0
309
+ }
310
+ },
311
+ "o1": {
312
+ "provider": "openai",
313
+ "name": "o1",
314
+ "model_config_display_name": null,
315
+ "parameters": {
316
+ "response_format": {
317
+ "type": "text"
318
+ },
319
+ "reasoning_effort": "high",
320
+ "max_completion_tokens": 100000
321
+ }
322
+ },
323
+ "o3": {
324
+ "provider": "openai",
325
+ "name": "o3",
326
+ "model_config_display_name": null,
327
+ "parameters": {
328
+ "response_format": {
329
+ "type": "text"
330
+ },
331
+ "reasoning_effort": "high",
332
+ "max_completion_tokens": 100000
333
+ }
334
+ },
335
+ "o3-2025-04-16": {
336
+ "provider": "openai",
337
+ "name": "o3-2025-04-16",
338
+ "model_config_display_name": null,
339
+ "parameters": {
340
+ "response_format": {
341
+ "type": "text"
342
+ },
343
+ "reasoning_effort": "high",
344
+ "max_completion_tokens": 100000
345
+ }
346
+ },
347
+ "o4-mini-2025-04-16": {
348
+ "provider": "openai",
349
+ "name": "o4-mini-2025-04-16",
350
+ "model_config_display_name": null,
351
+ "parameters": {
352
+ "response_format": {
353
+ "type": "text"
354
+ },
355
+ "reasoning_effort": "high",
356
+ "max_completion_tokens": 100000
357
+ }
358
+ },
359
+
360
+ "claude-3-haiku-latest": {
361
+ "provider": "anthropic",
362
+ "name": "claude-3-haiku-latest",
363
+ "model_config_display_name": null,
364
+ "parameters": {
365
+ "max_tokens": 4096,
366
+ "temperature": 1,
367
+ "top_k": 0,
368
+ "top_p": 0
369
+ }
370
+ },
371
+ "claude-3-haiku-20240307": {
372
+ "provider": "anthropic",
373
+ "name": "claude-3-haiku-20240307",
374
+ "model_config_display_name": null,
375
+ "parameters": {
376
+ "max_tokens": 4096,
377
+ "temperature": 1,
378
+ "top_k": 0,
379
+ "top_p": 0
380
+ }
381
+ },
382
+ "claude-3-opus-latest": {
383
+ "provider": "anthropic",
384
+ "name": "claude-3-opus-latest",
385
+ "model_config_display_name": null,
386
+ "parameters": {
387
+ "max_tokens": 4096,
388
+ "temperature": 1,
389
+ "top_k": 0,
390
+ "top_p": 0
391
+ }
392
+ },
393
+ "claude-3-opus-20240229": {
394
+ "provider": "anthropic",
395
+ "name": "claude-3-opus-20240229",
396
+ "model_config_display_name": null,
397
+ "parameters": {
398
+ "max_tokens": 4096,
399
+ "temperature": 1,
400
+ "top_k": 0,
401
+ "top_p": 0
402
+ }
403
+ },
404
+ "claude-3-sonnet-latest": {
405
+ "provider": "anthropic",
406
+ "name": "claude-3-sonnet-latest",
407
+ "model_config_display_name": null,
408
+ "parameters": {
409
+ "max_tokens": 4096,
410
+ "temperature": 1,
411
+ "top_k": 0,
412
+ "top_p": 0
413
+ }
414
+ },
415
+ "claude-3-sonnet-20240229": {
416
+ "provider": "anthropic",
417
+ "name": "claude-3-sonnet-20240229",
418
+ "model_config_display_name": null,
419
+ "parameters": {
420
+ "max_tokens": 4096,
421
+ "temperature": 1,
422
+ "top_k": 0,
423
+ "top_p": 0
424
+ }
425
+ },
426
+ "claude-3-5-sonnet-latest": {
427
+ "provider": "anthropic",
428
+ "name": "claude-3-5-sonnet-latest",
429
+ "model_config_display_name": null,
430
+ "parameters": {
431
+ "max_tokens": 8192,
432
+ "temperature": 1,
433
+ "top_k": 0,
434
+ "top_p": 0
435
+ }
436
+ },
437
+ "claude-3-5-sonnet-20240620": {
438
+ "provider": "anthropic",
439
+ "name": "claude-3-5-sonnet-20240620",
440
+ "model_config_display_name": null,
441
+ "parameters": {
442
+ "max_tokens": 8192,
443
+ "temperature": 1,
444
+ "top_k": 0,
445
+ "top_p": 0
446
+ }
447
+ },
448
+ "claude-2.1": {
449
+ "provider": "anthropic",
450
+ "name": "claude-2.1",
451
+ "model_config_display_name": null,
452
+ "parameters": {
453
+ "max_tokens": 4096,
454
+ "temperature": 1,
455
+ "top_k": 0,
456
+ "top_p": 0
457
+ }
458
+ },
459
+ "mistral-large-latest": {
460
+ "provider": "mistral",
461
+ "name": "mistral-large-latest",
462
+ "model_config_display_name": null,
463
+ "parameters": {
464
+ "temperature": 1,
465
+ "top_p": 1,
466
+ "max_tokens": 4096,
467
+ "safe_prompt": null,
468
+ "random_seed": 0
469
+ }
470
+ },
471
+ "mistral-medium-latest": {
472
+ "provider": "mistral",
473
+ "name": "mistral-medium-latest",
474
+ "model_config_display_name": null,
475
+ "parameters": {
476
+ "temperature": 1,
477
+ "top_p": 1,
478
+ "max_tokens": 8192,
479
+ "safe_prompt": null,
480
+ "random_seed": 0
481
+ }
482
+ },
483
+ "mistral-small-latest": {
484
+ "provider": "mistral",
485
+ "name": "mistral-small-latest",
486
+ "model_config_display_name": null,
487
+ "parameters": {
488
+ "temperature": 1,
489
+ "top_p": 1,
490
+ "max_tokens": 8192,
491
+ "safe_prompt": null,
492
+ "random_seed": 0
493
+ }
494
+ },
495
+ "open-mixtral-8x22b": {
496
+ "provider": "mistral",
497
+ "name": "open-mixtral-8x22b",
498
+ "model_config_display_name": null,
499
+ "parameters": {
500
+ "temperature": 1,
501
+ "top_p": 1,
502
+ "max_tokens": 4096,
503
+ "safe_prompt": null,
504
+ "random_seed": 0
505
+ }
506
+ },
507
+ "open-mixtral-8x7b": {
508
+ "provider": "mistral",
509
+ "name": "open-mixtral-8x7b",
510
+ "model_config_display_name": null,
511
+ "parameters": {
512
+ "temperature": 1,
513
+ "top_p": 1,
514
+ "max_tokens": 4096,
515
+ "safe_prompt": null,
516
+ "random_seed": 0
517
+ }
518
+ },
519
+ "open-mistral-7b": {
520
+ "provider": "mistral",
521
+ "name": "open-mistral-7b",
522
+ "model_config_display_name": null,
523
+ "parameters": {
524
+ "temperature": 1,
525
+ "top_p": 1,
526
+ "max_tokens": 4096,
527
+ "safe_prompt": null,
528
+ "random_seed": 0
529
+ }
530
+ },
531
+ "open-mistral-nemo": {
532
+ "provider": "mistral",
533
+ "name": "open-mistral-nemo",
534
+ "model_config_display_name": null,
535
+ "parameters": {
536
+ "temperature": 1,
537
+ "top_p": 1,
538
+ "max_tokens": 4096,
539
+ "safe_prompt": null,
540
+ "random_seed": 0
541
+ }
542
+ },
543
+ "command-r": {
544
+ "provider": "cohere",
545
+ "name": "command-r",
546
+ "model_config_display_name": null,
547
+ "parameters": {
548
+ "max_tokens": 4096,
549
+ "temperature": 1,
550
+ "k": 0,
551
+ "p": 0,
552
+ "presence_penalty": 0,
553
+ "frequency_penalty": 0
554
+ }
555
+ },
556
+ "command-r-03-2024": {
557
+ "provider": "cohere",
558
+ "name": "command-r-03-2024",
559
+ "model_config_display_name": null,
560
+ "parameters": {
561
+ "max_tokens": 4096,
562
+ "temperature": 1,
563
+ "k": 0,
564
+ "p": 0,
565
+ "presence_penalty": 0,
566
+ "frequency_penalty": 0
567
+ }
568
+ },
569
+ "command-r-08-2024": {
570
+ "provider": "cohere",
571
+ "name": "command-r-08-2024",
572
+ "model_config_display_name": null,
573
+ "parameters": {
574
+ "max_tokens": 4096,
575
+ "temperature": 1,
576
+ "k": 0,
577
+ "p": 0,
578
+ "presence_penalty": 0,
579
+ "frequency_penalty": 0
580
+ }
581
+ },
582
+ "command-r-plus": {
583
+ "provider": "cohere",
584
+ "name": "command-r-plus",
585
+ "model_config_display_name": null,
586
+ "parameters": {
587
+ "max_tokens": 4096,
588
+ "temperature": 1,
589
+ "k": 0,
590
+ "p": 0,
591
+ "presence_penalty": 0,
592
+ "frequency_penalty": 0
593
+ }
594
+ },
595
+ "command-r-plus-04-2024": {
596
+ "provider": "cohere",
597
+ "name": "command-r-plus-04-2024",
598
+ "model_config_display_name": null,
599
+ "parameters": {
600
+ "max_tokens": 4096,
601
+ "temperature": 1,
602
+ "k": 0,
603
+ "p": 0,
604
+ "presence_penalty": 0,
605
+ "frequency_penalty": 0
606
+ }
607
+ },
608
+ "command-r-plus-08-2024": {
609
+ "provider": "cohere",
610
+ "name": "command-r-plus-08-2024",
611
+ "model_config_display_name": null,
612
+ "parameters": {
613
+ "max_tokens": 4096,
614
+ "temperature": 1,
615
+ "k": 0,
616
+ "p": 0,
617
+ "presence_penalty": 0,
618
+ "frequency_penalty": 0
619
+ }
620
+ },
621
+ "command-r7b-12-2024": {
622
+ "provider": "cohere",
623
+ "name": "command-r7b-12-2024",
624
+ "model_config_display_name": null,
625
+ "parameters": {
626
+ "max_tokens": 4096,
627
+ "temperature": 1,
628
+ "k": 0,
629
+ "p": 0,
630
+ "presence_penalty": 0,
631
+ "frequency_penalty": 0
632
+ }
633
+ },
634
+ "command": {
635
+ "provider": "cohere",
636
+ "name": "command",
637
+ "model_config_display_name": null,
638
+ "parameters": {
639
+ "max_tokens": 4096,
640
+ "temperature": 1,
641
+ "k": 0,
642
+ "p": 0,
643
+ "presence_penalty": 0,
644
+ "frequency_penalty": 0
645
+ }
646
+ },
647
+ "command-light": {
648
+ "provider": "cohere",
649
+ "name": "command-light",
650
+ "model_config_display_name": null,
651
+ "parameters": {
652
+ "max_tokens": 4096,
653
+ "temperature": 1,
654
+ "k": 0,
655
+ "p": 0,
656
+ "presence_penalty": 0,
657
+ "frequency_penalty": 0
658
+ }
659
+ },
660
+ "command-nightly": {
661
+ "provider": "cohere",
662
+ "name": "command-nightly",
663
+ "model_config_display_name": null,
664
+ "parameters": {
665
+ "max_tokens": 4096,
666
+ "temperature": 1,
667
+ "k": 0,
668
+ "p": 0,
669
+ "presence_penalty": 0,
670
+ "frequency_penalty": 0
671
+ }
672
+ },
673
+ "command-light-nightly": {
674
+ "provider": "cohere",
675
+ "name": "command-light-nightly",
676
+ "model_config_display_name": null,
677
+ "parameters": {
678
+ "max_tokens": 4096,
679
+ "temperature": 1,
680
+ "k": 0,
681
+ "p": 0,
682
+ "presence_penalty": 0,
683
+ "frequency_penalty": 0
684
+ }
685
+ },
686
+ }
687
+
688
+ module.exports = modelMap
src/routes/chat.js CHANGED
@@ -1,431 +1,450 @@
1
- const express = require('express')
2
- const axios = require('axios')
3
- const WebSocket = require('ws')
4
- const router = express.Router()
5
- const { v4: uuidv4 } = require('uuid')
6
- const { uploadFileBuffer } = require('../lib/upload')
7
- const verify = require('./verify')
8
- const modelMap = require('../lib/model-map')
9
-
10
-
11
- async function parseMessages(req, res, next) {
12
- const messages = req.body.messages
13
- if (!Array.isArray(messages)) {
14
- req.processedMessages = []
15
- return next()
16
- }
17
-
18
- try {
19
- const transformedMessages = await Promise.all(messages.map(async (msg) => {
20
- const message = {
21
- role: msg.role,
22
- tool_calls: [],
23
- template_format: "jinja2"
24
- }
25
-
26
- if (Array.isArray(msg.content)) {
27
- const contentItems = await Promise.all(msg.content.map(async (item) => {
28
- if (item.type === "text") {
29
- return {
30
- type: "text",
31
- text: item.text
32
- }
33
- }
34
- else if (item.type === "image_url") {
35
- try {
36
- const base64Match = item.image_url.url.match(/^data:image\/\w+;base64,(.+)$/)
37
- if (base64Match) {
38
- const base64 = base64Match[1]
39
- const data = Buffer.from(base64, 'base64')
40
- const uploadResult = await uploadFileBuffer(data)
41
-
42
- return {
43
- type: "media",
44
- media: {
45
- "type": "image",
46
- "url": uploadResult.file_url,
47
- "title": `image_${Date.now()}.png`
48
- }
49
- }
50
- } else {
51
- return {
52
- type: "media",
53
- media: {
54
- "type": "image",
55
- "url": item.image_url.url,
56
- "title": "external_image"
57
- }
58
- }
59
- }
60
- } catch (error) {
61
- console.error("处理图像时出错:", error)
62
- return {
63
- type: "text",
64
- text: "[图像处理失败]"
65
- }
66
- }
67
- } else {
68
- return {
69
- type: "text",
70
- text: JSON.stringify(item)
71
- }
72
- }
73
- }))
74
-
75
- message.content = contentItems
76
- } else {
77
- message.content = [
78
- {
79
- type: "text",
80
- text: msg.content || ""
81
- }
82
- ]
83
- }
84
-
85
- return message
86
- }))
87
-
88
- req.body.messages = transformedMessages
89
- return next()
90
- } catch (error) {
91
- console.error("处理消息时出错:", error.status)
92
- req.body.messages = []
93
- return next(error)
94
- }
95
- }
96
-
97
- async function getChatID(req, res) {
98
- try {
99
- const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/playground_sessions'
100
- const headers = { Authorization: "Bearer " + req.account.token }
101
- const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
102
- let data = {
103
- "id": uuidv4(),
104
- "name": "Not implemented",
105
- "prompt_blueprint": {
106
- "inference_client_name": null,
107
- "metadata": {
108
- "model": model_data
109
- },
110
- "prompt_template": {
111
- "type": "chat",
112
- "messages": req.body.messages,
113
- "tools": req.body.tools || [],
114
- "tool_choice": req.body.tool_choice || "none",
115
- "input_variables": [],
116
- "functions": [],
117
- "function_call": null
118
- },
119
- "provider_base_url_name": null
120
- },
121
- "input_variables": []
122
- }
123
-
124
- for (const item in req.body) {
125
- if (item === "messages" || item === "model" || item === "stream") {
126
- continue
127
- } else if (model_data.parameters[item]) {
128
- model_data.parameters[item] = req.body[item]
129
- }
130
- }
131
- data.prompt_blueprint.metadata.model = model_data
132
- console.log(`模型参数: ${data.prompt_blueprint.metadata.model}`)
133
-
134
- const response = await axios.put(url, data, { headers })
135
- if (response.data.success) {
136
- console.log(`生成会话ID成功: ${response.data.playground_session.id}`)
137
- req.chatID = response.data.playground_session.id
138
- return response.data.playground_session.id
139
- } else {
140
- return false
141
- }
142
- } catch (error) {
143
- // console.error("错误:", error.response?.data)
144
- res.status(500).json({
145
- "error": {
146
- "message": error.message || "服务器内部错误",
147
- "type": "server_error",
148
- "param": null,
149
- "code": "server_error"
150
- }
151
- })
152
- return false
153
- }
154
- }
155
-
156
- async function sentRequest(req, res) {
157
- try {
158
- const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/run_groups'
159
- const headers = { Authorization: "Bearer " + req.account.token }
160
- const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
161
- let data = {
162
- "id": uuidv4(),
163
- "playground_session_id": req.chatID,
164
- "shared_prompt_blueprint": {
165
- "inference_client_name": null,
166
- "metadata": {
167
- "model": model_data
168
- },
169
- "prompt_template": {
170
- "type": "chat",
171
- "messages": req.body.messages,
172
- "tools": req.body.tools || [],
173
- "tool_choice": req.body.tool_choice || "none",
174
- "input_variables": [],
175
- "functions": [],
176
- "function_call": null
177
- },
178
- "provider_base_url_name": null
179
- },
180
- "individual_run_requests": [
181
- {
182
- "input_variables": {},
183
- "run_group_position": 1
184
- }
185
- ]
186
- }
187
-
188
- for (const item in req.body) {
189
- if (item === "messages" || item === "model" || item === "stream") {
190
- continue
191
- } else if (model_data.parameters[item]) {
192
- model_data.parameters[item] = req.body[item]
193
- }
194
- }
195
- data.shared_prompt_blueprint.metadata.model = model_data
196
-
197
- const response = await axios.post(url, data, { headers })
198
- if (response.data.success) {
199
- return response.data.run_group.individual_run_requests[0].id
200
- } else {
201
- return false
202
- }
203
- } catch (error) {
204
- // console.error("错误:", error.response?.data)
205
- res.status(500).json({
206
- "error": {
207
- "message": error.message || "服务器内部错误",
208
- "type": "server_error",
209
- "param": null,
210
- "code": "server_error"
211
- }
212
- })
213
- }
214
- }
215
-
216
- // 聊天完成路由
217
- router.post('/v1/chat/completions', verify, parseMessages, async (req, res) => {
218
- // console.log(JSON.stringify(req.body))
219
-
220
- try {
221
-
222
- const setHeader = () => {
223
- try {
224
- if (req.body.stream === true) {
225
- res.setHeader('Content-Type', 'text/event-stream')
226
- res.setHeader('Cache-Control', 'no-cache')
227
- res.setHeader('Connection', 'keep-alive')
228
- } else {
229
- res.setHeader('Content-Type', 'application/json')
230
- }
231
- } catch (error) {
232
- // console.error("设置响应头时出错:", error)
233
- }
234
- }
235
-
236
- const { access_token, clientId } = req.account
237
- // 生成会话ID
238
- await getChatID(req, res)
239
-
240
- // 发送的数据
241
- const sendAction = `{"action":10,"channel":"user:${clientId}","params":{"agent":"react-hooks/2.0.2"}}`
242
- // 构建 WebSocket URL
243
- const wsUrl = `wss://realtime.ably.io/?access_token=${encodeURIComponent(access_token)}&clientId=${clientId}&format=json&heartbeats=true&v=3&agent=ably-js%2F2.0.2%20browser`
244
- // 创建 WebSocket 连接
245
- const ws = new WebSocket(wsUrl)
246
-
247
- // 状态详细
248
- let ThinkingLastContent = ""
249
- let TextLastContent = ""
250
- let ThinkingStart = false
251
- let ThinkingEnd = false
252
- let RequestID = ""
253
- let MessageID = "chatcmpl-" + uuidv4()
254
- let streamChunk = {
255
- "id": MessageID,
256
- "object": "chat.completion.chunk",
257
- "system_fingerprint": "fp_44709d6fcb",
258
- "created": Math.floor(Date.now() / 1000),
259
- "model": req.body.model,
260
- "choices": [
261
- {
262
- "index": 0,
263
- "delta": {
264
- "content": null
265
- },
266
- "finish_reason": null
267
- }
268
- ]
269
- }
270
-
271
- ws.on('open', async () => {
272
- ws.send(sendAction)
273
- RequestID = await sentRequest(req, res)
274
- setHeader()
275
- })
276
-
277
- ws.on('message', async (data) => {
278
- try {
279
- data = data.toString()
280
- // console.log(JSON.parse(data))
281
- let ContentText = JSON.parse(data)?.messages?.[0]
282
- let ContentData = JSON.parse(ContentText?.data)
283
- const isRequestID = ContentData?.individual_run_request_id
284
- if (isRequestID != RequestID || !isRequestID) return
285
-
286
- let output = ""
287
-
288
- if (ContentText?.name === "UPDATE_LAST_MESSAGE") {
289
- const MessageArray = ContentData?.payload?.message?.content
290
- for (const item of MessageArray) {
291
-
292
- if (item.type === "text") {
293
- output = item.text.replace(TextLastContent, "")
294
- if (ThinkingStart && !ThinkingEnd) {
295
- ThinkingEnd = true
296
- output = `${output}\n\n</think>`
297
- }
298
- TextLastContent = item.text
299
- }
300
- else if (item.type === "thinking" && MessageArray.length === 1) {
301
- output = item.thinking.replace(ThinkingLastContent, "")
302
- if (!ThinkingStart) {
303
- ThinkingStart = true
304
- output = `<think>\n\n${output}`
305
- }
306
- ThinkingLastContent = item.thinking
307
- }
308
-
309
- }
310
-
311
- if (req.body.stream === true) {
312
- streamChunk.choices[0].delta.content = output
313
- res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
314
- }
315
-
316
- }
317
- else if (ContentText?.name === "INDIVIDUAL_RUN_COMPLETE") {
318
-
319
- if (req.body.stream !== true) {
320
- output = ThinkingLastContent ? `<think>\n\n${ThinkingLastContent}\n\n</think>\n\n${TextLastContent}` : TextLastContent
321
- }
322
-
323
- if (ThinkingLastContent === "" && TextLastContent === "") {
324
- output = "该模型在发送请求时遇到错误: \n1. 请检查请求参数,模型支持参数和默认参数可在/v1/models下查看\n2. 参数设置大小是否超过模型限制\n3. 模型当前官网此模型可能负载过高,可以切换别的模型尝试,这属于正常现象\n4. Anthropic系列模型的temperature的取值为0-1,请勿设置超过1的值\n5. 交流与支持群: https://t.me/nodejs_project"
325
- streamChunk.choices[0].delta.content = output
326
- res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
327
- }
328
-
329
- if (!req.body.stream || req.body.stream !== true) {
330
- let responseJson = {
331
- "id": MessageID,
332
- "object": "chat.completion",
333
- "created": Math.floor(Date.now() / 1000),
334
- "system_fingerprint": "fp_44709d6fcb",
335
- "model": req.body.model,
336
- "choices": [
337
- {
338
- "index": 0,
339
- "message": {
340
- "role": "assistant",
341
- "content": output
342
- },
343
- "finish_reason": "stop"
344
- }
345
- ],
346
- "usage": {
347
- "prompt_tokens": 0,
348
- "completion_tokens": 0,
349
- "total_tokens": 0
350
- }
351
- }
352
-
353
- res.json(responseJson)
354
- ws.close()
355
- return
356
- } else {
357
- // 流式响应:发送结束标记
358
- let finalChunk = {
359
- "id": MessageID,
360
- "object": "chat.completion.chunk",
361
- "system_fingerprint": "fp_44709d6fcb",
362
- "created": Math.floor(Date.now() / 1000),
363
- "model": req.body.model,
364
- "choices": [
365
- {
366
- "index": 0,
367
- "delta": {},
368
- "finish_reason": "stop"
369
- }
370
- ]
371
- }
372
-
373
- res.write(`data: ${JSON.stringify(finalChunk)}\n\n`)
374
- res.write(`data: [DONE]\n\n`)
375
- res.end()
376
- }
377
- ws.close()
378
- }
379
-
380
- } catch (err) {
381
- // console.error("处理WebSocket消息出错:", err)
382
- }
383
- })
384
-
385
- ws.on('error', (err) => {
386
- // 标准OpenAI错误响应格式
387
- res.status(500).json({
388
- "error": {
389
- "message": err.message,
390
- "type": "server_error",
391
- "param": null,
392
- "code": "server_error"
393
- }
394
- })
395
- })
396
-
397
- const longTimeoutModels = ["o1", "o3", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"];
398
- const timeoutDuration = longTimeoutModels.includes(req.body.model) ? 30 * 60 * 1000 : 300 * 1000;
399
-
400
- setTimeout(() => {
401
- if (ws.readyState === WebSocket.OPEN) {
402
- ws.close()
403
- if (!res.headersSent) {
404
- // 标准OpenAI超时错误响应格式
405
- res.status(504).json({
406
- "error": {
407
- "message": "请求超时",
408
- "type": "timeout",
409
- "param": null,
410
- "code": "timeout_error"
411
- }
412
- })
413
- }
414
- }
415
- }, timeoutDuration)
416
-
417
- } catch (error) {
418
- console.error("错误:", error)
419
- // 标准OpenAI通用错误响应格式
420
- res.status(500).json({
421
- "error": {
422
- "message": error.message || "服务器内部错误",
423
- "type": "server_error",
424
- "param": null,
425
- "code": "server_error"
426
- }
427
- })
428
- }
429
- })
430
-
431
- module.exports = router
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const express = require('express')
2
+ const axios = require('axios')
3
+ const WebSocket = require('ws')
4
+ const router = express.Router()
5
+ const { v4: uuidv4 } = require('uuid')
6
+ const { uploadFileBuffer } = require('../lib/upload')
7
+ const verify = require('./verify')
8
+ const modelMap = require('../lib/model-map')
9
+
10
+
11
+ async function parseMessages(req, res, next) {
12
+ const messages = req.body.messages
13
+ if (!Array.isArray(messages)) {
14
+ req.processedMessages = []
15
+ return next()
16
+ }
17
+
18
+ try {
19
+ const transformedMessages = await Promise.all(messages.map(async (msg) => {
20
+ // Determine provider to conditionally convert system role for Anthropic
21
+ const modelName = req.body.model;
22
+ const modelData = modelMap[modelName]; // modelMap is required at the top
23
+ const provider = modelData?.provider; // Use optional chaining for safety
24
+
25
+ let message = {
26
+ role: (msg.role === "system" && provider === "anthropic") ? "user" : msg.role,
27
+ tool_calls: [],
28
+ template_format: "f-string"
29
+ }
30
+
31
+ if (Array.isArray(msg.content)) {
32
+ const contentItems = await Promise.all(msg.content.map(async (item) => {
33
+ if (item.type === "text") {
34
+ return {
35
+ type: "text",
36
+ text: item.text
37
+ }
38
+ }
39
+ else if (item.type === "image_url") {
40
+ try {
41
+ const base64Match = item.image_url.url.match(/^data:image\/\w+;base64,(.+)$/)
42
+ if (base64Match) {
43
+ const base64 = base64Match[1]
44
+ const data = Buffer.from(base64, 'base64')
45
+ const uploadResult = await uploadFileBuffer(data)
46
+
47
+ return {
48
+ type: "media",
49
+ media: {
50
+ "type": "image",
51
+ "url": uploadResult.file_url,
52
+ "title": `image_${Date.now()}.png`
53
+ }
54
+ }
55
+ } else {
56
+ return {
57
+ type: "media",
58
+ media: {
59
+ "type": "image",
60
+ "url": item.image_url.url,
61
+ "title": "external_image"
62
+ }
63
+ }
64
+ }
65
+ } catch (error) {
66
+ console.error("处理图像时出错:", error)
67
+ return {
68
+ type: "text",
69
+ text: "[图像处理失败]"
70
+ }
71
+ }
72
+ } else {
73
+ return {
74
+ type: "text",
75
+ text: JSON.stringify(item)
76
+ }
77
+ }
78
+ }))
79
+
80
+ message.content = contentItems
81
+ } else {
82
+ message.content = [
83
+ {
84
+ type: "text",
85
+ text: msg.content || ""
86
+ }
87
+ ]
88
+ }
89
+
90
+ return message
91
+ }))
92
+
93
+ req.body.messages = transformedMessages
94
+ return next()
95
+ } catch (error) {
96
+ console.error("处理消息时出错:", error.status)
97
+ req.body.messages = []
98
+ return next(error)
99
+ }
100
+ }
101
+
102
+ async function getChatID(req, res) {
103
+ try {
104
+ const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/playground_sessions'
105
+ const headers = { Authorization: "Bearer " + req.account.token }
106
+ const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
107
+ let data = {
108
+ "id": uuidv4(),
109
+ "name": "Not implemented",
110
+ "prompt_blueprint": {
111
+ "inference_client_name": null,
112
+ "metadata": {
113
+ "model": model_data
114
+ },
115
+ "prompt_template": {
116
+ "type": "chat",
117
+ "messages": req.body.messages,
118
+ "tools": req.body.tools || [],
119
+ "tool_choice": req.body.tool_choice || "none",
120
+ "input_variables": [],
121
+ "functions": [],
122
+ "function_call": null
123
+ },
124
+ "provider_base_url_name": null
125
+ },
126
+ "input_variables": []
127
+ }
128
+
129
+ for (const item in req.body) {
130
+ if (item === "messages" || item === "model" || item === "stream") {
131
+ continue
132
+ } else if (model_data.parameters[item]) {
133
+ model_data.parameters[item] = req.body[item]
134
+ }
135
+ }
136
+ data.prompt_blueprint.metadata.model = model_data
137
+ console.log(`模型参数: ${data.prompt_blueprint.metadata.model}`)
138
+
139
+ const response = await axios.put(url, data, { headers })
140
+ if (response.data.success) {
141
+ console.log(`生成会话ID成功: ${response.data.playground_session.id}`)
142
+ req.chatID = response.data.playground_session.id
143
+ return response.data.playground_session.id
144
+ } else {
145
+ return false
146
+ }
147
+ } catch (error) {
148
+ // console.error("错误:", error.response?.data)
149
+ res.status(500).json({
150
+ "error": {
151
+ "message": error.message || "服务器内部错误",
152
+ "type": "server_error",
153
+ "param": null,
154
+ "code": "server_error"
155
+ }
156
+ })
157
+ return false
158
+ }
159
+ }
160
+
161
+ async function sentRequest(req, res) {
162
+ try {
163
+ const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/run_groups'
164
+ const headers = { Authorization: "Bearer " + req.account.token }
165
+ const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"];
166
+ const provider = model_data?.provider; // Get provider
167
+
168
+ // Base prompt template structure
169
+ let prompt_template = {
170
+ "type": "chat",
171
+ "messages": req.body.messages,
172
+ "tools": req.body.tools || [], // Default value
173
+ "tool_choice": req.body.tool_choice || "none", // Default value
174
+ "input_variables": [],
175
+ "functions": [],
176
+ "function_call": null
177
+ };
178
+
179
+ // Conditionally modify for Mistral/Cohere
180
+ if (provider === 'mistral' || provider === 'cohere') {
181
+ prompt_template.tools = null;
182
+ delete prompt_template.tool_choice; // Remove tool_choice entirely
183
+ delete prompt_template.function_call;
184
+ }
185
+
186
+ let data = {
187
+ "id": uuidv4(),
188
+ "playground_session_id": req.chatID,
189
+ "shared_prompt_blueprint": {
190
+ "inference_client_name": null,
191
+ "metadata": {
192
+ "model": model_data // Keep original model_data here for metadata
193
+ },
194
+ "prompt_template": prompt_template, // Use the adjusted template
195
+ "provider_base_url_name": null
196
+ },
197
+ "individual_run_requests": [
198
+ {
199
+ "input_variables": {},
200
+ "run_group_position": 1
201
+ }
202
+ ]
203
+ };
204
+
205
+ console.log(JSON.stringify(data))
206
+
207
+ // Update model parameters (this loop remains the same)
208
+ for (const item in req.body) {
209
+ if (item === "messages" || item === "model" || item === "stream") {
210
+ continue
211
+ } else if (model_data.parameters && model_data.parameters.hasOwnProperty(item)) { // Check if parameters exist and has the property
212
+ model_data.parameters[item] = req.body[item]
213
+ }
214
+ }
215
+ // Ensure the potentially modified model_data (with updated parameters) is in metadata
216
+ data.shared_prompt_blueprint.metadata.model = model_data;
217
+
218
+ const response = await axios.post(url, data, { headers });
219
+ if (response.data.success) {
220
+ return response.data.run_group.individual_run_requests[0].id
221
+ } else {
222
+ return false
223
+ }
224
+ } catch (error) {
225
+ // console.error("错误:", error.response?.data)
226
+ res.status(500).json({
227
+ "error": {
228
+ "message": error.message || "服务器内部错误",
229
+ "type": "server_error",
230
+ "param": null,
231
+ "code": "server_error"
232
+ }
233
+ })
234
+ }
235
+ }
236
+
237
+ // 聊天完成路由
238
+ router.post('/v1/chat/completions', verify, parseMessages, async (req, res) => {
239
+ // console.log(JSON.stringify(req.body))
240
+
241
+ try {
242
+
243
+ const setHeader = () => {
244
+ try {
245
+ if (req.body.stream === true) {
246
+ res.setHeader('Content-Type', 'text/event-stream')
247
+ res.setHeader('Cache-Control', 'no-cache')
248
+ res.setHeader('Connection', 'keep-alive')
249
+ } else {
250
+ res.setHeader('Content-Type', 'application/json')
251
+ }
252
+ } catch (error) {
253
+ // console.error("设置响应头时出错:", error)
254
+ }
255
+ }
256
+
257
+ const { access_token, clientId } = req.account
258
+ // 生成会话ID
259
+ await getChatID(req, res)
260
+
261
+ // 发送的数据
262
+ const sendAction = `{"action":10,"channel":"user:${clientId}","params":{"agent":"react-hooks/2.0.2"}}`
263
+ // 构建 WebSocket URL
264
+ const wsUrl = `wss://realtime.ably.io/?access_token=${encodeURIComponent(access_token)}&clientId=${clientId}&format=json&heartbeats=true&v=3&agent=ably-js%2F2.0.2%20browser`
265
+ // 创建 WebSocket 连接
266
+ const ws = new WebSocket(wsUrl)
267
+
268
+ // 状态详细
269
+ let ThinkingLastContent = ""
270
+ let TextLastContent = ""
271
+ let ThinkingStart = false
272
+ let ThinkingEnd = false
273
+ let RequestID = ""
274
+ let MessageID = "chatcmpl-" + uuidv4()
275
+ let streamChunk = {
276
+ "id": MessageID,
277
+ "object": "chat.completion.chunk",
278
+ "system_fingerprint": "fp_44709d6fcb",
279
+ "created": Math.floor(Date.now() / 1000),
280
+ "model": req.body.model,
281
+ "choices": [
282
+ {
283
+ "index": 0,
284
+ "delta": {
285
+ "content": null
286
+ },
287
+ "finish_reason": null
288
+ }
289
+ ]
290
+ }
291
+
292
+ ws.on('open', async () => {
293
+ ws.send(sendAction)
294
+ RequestID = await sentRequest(req, res)
295
+ setHeader()
296
+ })
297
+
298
+ ws.on('message', async (data) => {
299
+ try {
300
+ data = data.toString()
301
+ console.log("here!!!")
302
+ console.log(data)
303
+ let ContentText = JSON.parse(data)?.messages?.[0]
304
+ let ContentData = JSON.parse(ContentText?.data)
305
+ const isRequestID = ContentData?.individual_run_request_id
306
+ if (isRequestID != RequestID || !isRequestID) return
307
+
308
+ let output = ""
309
+
310
+ if (ContentText?.name === "UPDATE_LAST_MESSAGE") {
311
+ const MessageArray = ContentData?.payload?.message?.content
312
+ for (const item of MessageArray) {
313
+
314
+ if (item.type === "text") {
315
+ output = item.text.replace(TextLastContent, "")
316
+ if (ThinkingStart && !ThinkingEnd) {
317
+ ThinkingEnd = true
318
+ output = `${output}\n\n</think>`
319
+ }
320
+ TextLastContent = item.text
321
+ }
322
+ else if (item.type === "thinking" && MessageArray.length === 1) {
323
+ output = item.thinking.replace(ThinkingLastContent, "")
324
+ if (!ThinkingStart) {
325
+ ThinkingStart = true
326
+ output = `<think>\n\n${output}`
327
+ }
328
+ ThinkingLastContent = item.thinking
329
+ }
330
+
331
+ }
332
+
333
+ if (req.body.stream === true) {
334
+ streamChunk.choices[0].delta.content = output
335
+ res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
336
+ }
337
+
338
+ }
339
+ else if (ContentText?.name === "INDIVIDUAL_RUN_COMPLETE") {
340
+
341
+ if (req.body.stream !== true) {
342
+ output = ThinkingLastContent ? `<think>\n\n${ThinkingLastContent}\n\n</think>\n\n${TextLastContent}` : TextLastContent
343
+ }
344
+
345
+ if (ThinkingLastContent === "" && TextLastContent === "") {
346
+ output = "该模型在发送请求时遇到错误: \n1. 请检查请求参数,模型支持参数和默认参数可在/v1/models下查看\n2. 参数设置大小是否超过模型限制\n3. 模型当前官网此模型可能负载过高,可以切换别的模型尝试,这属于正常现象\n4. Anthropic系列模型的temperature的取值为0-1,请勿设置超过1的值\n5. 交流与支持群: https://t.me/nodejs_project"
347
+ streamChunk.choices[0].delta.content = output
348
+ res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
349
+ }
350
+
351
+ if (!req.body.stream || req.body.stream !== true) {
352
+ let responseJson = {
353
+ "id": MessageID,
354
+ "object": "chat.completion",
355
+ "created": Math.floor(Date.now() / 1000),
356
+ "system_fingerprint": "fp_44709d6fcb",
357
+ "model": req.body.model,
358
+ "choices": [
359
+ {
360
+ "index": 0,
361
+ "message": {
362
+ "role": "assistant",
363
+ "content": output
364
+ },
365
+ "finish_reason": "stop"
366
+ }
367
+ ],
368
+ "usage": {
369
+ "prompt_tokens": 0,
370
+ "completion_tokens": 0,
371
+ "total_tokens": 0
372
+ }
373
+ }
374
+
375
+ res.json(responseJson)
376
+ ws.close()
377
+ return
378
+ } else {
379
+ // 流式响应:发送结束标记
380
+ let finalChunk = {
381
+ "id": MessageID,
382
+ "object": "chat.completion.chunk",
383
+ "system_fingerprint": "fp_44709d6fcb",
384
+ "created": Math.floor(Date.now() / 1000),
385
+ "model": req.body.model,
386
+ "choices": [
387
+ {
388
+ "index": 0,
389
+ "delta": {},
390
+ "finish_reason": "stop"
391
+ }
392
+ ]
393
+ }
394
+
395
+ res.write(`data: ${JSON.stringify(finalChunk)}\n\n`)
396
+ res.write(`data: [DONE]\n\n`)
397
+ res.end()
398
+ }
399
+ ws.close()
400
+ }
401
+
402
+ } catch (err) {
403
+ // console.error("处理WebSocket消息出错:", err)
404
+ }
405
+ })
406
+
407
+ ws.on('error', (err) => {
408
+ // 标准OpenAI错误响应格式
409
+ res.status(500).json({
410
+ "error": {
411
+ "message": err.message,
412
+ "type": "server_error",
413
+ "param": null,
414
+ "code": "server_error"
415
+ }
416
+ })
417
+ })
418
+
419
+ setTimeout(() => {
420
+ if (ws.readyState === WebSocket.OPEN) {
421
+ ws.close()
422
+ if (!res.headersSent) {
423
+ // 标准OpenAI超时错误响应格式
424
+ res.status(504).json({
425
+ "error": {
426
+ "message": "请求超时",
427
+ "type": "timeout",
428
+ "param": null,
429
+ "code": "timeout_error"
430
+ }
431
+ })
432
+ }
433
+ }
434
+ }, 300 * 1000)
435
+
436
+ } catch (error) {
437
+ console.error("错误:", error)
438
+ // 标准OpenAI通用错误响应格式
439
+ res.status(500).json({
440
+ "error": {
441
+ "message": error.message || "服务器内部错误",
442
+ "type": "server_error",
443
+ "param": null,
444
+ "code": "server_error"
445
+ }
446
+ })
447
+ }
448
+ })
449
+
450
+ module.exports = router
src/routes/models.js CHANGED
@@ -3,7 +3,6 @@ const router = express.Router()
3
  const modelMap = require('../lib/model-map')
4
 
5
  router.get('/v1/models', (req, res) => {
6
- console.log('Available model keys in modelMap:', Object.keys(modelMap));
7
 
8
  const result = Object.keys(modelMap).map((id) => {
9
  const model_data = {
 
3
  const modelMap = require('../lib/model-map')
4
 
5
  router.get('/v1/models', (req, res) => {
 
6
 
7
  const result = Object.keys(modelMap).map((id) => {
8
  const model_data = {