add support for Google Cloud (#2175)
Browse files### What problem does this PR solve?
#1853 add support for Google Cloud
### Type of change
- [x] New Feature (non-breaking change which adds functionality)
---------
Co-authored-by: Zhedong Cen <[email protected]>
- api/apps/llm_app.py +8 -0
- conf/llm_factories.json +7 -0
- rag/llm/__init__.py +1 -0
- rag/llm/chat_model.py +159 -2
- requirements.txt +1 -0
- requirements_arm.txt +1 -0
- web/src/assets/svg/llm/google-cloud.svg +1 -0
- web/src/locales/en.ts +10 -0
- web/src/locales/zh-traditional.ts +10 -0
- web/src/locales/zh.ts +10 -0
- web/src/pages/user-setting/setting-model/constant.ts +1 -0
- web/src/pages/user-setting/setting-model/google-modal/index.tsx +95 -0
- web/src/pages/user-setting/setting-model/hooks.ts +27 -0
- web/src/pages/user-setting/setting-model/index.tsx +21 -1
api/apps/llm_app.py
CHANGED
@@ -150,6 +150,14 @@ def add_llm():
|
|
150 |
llm_name = req["llm_name"]
|
151 |
api_key = '{' + f'"fish_audio_ak": "{req.get("fish_audio_ak", "")}", ' \
|
152 |
f'"fish_audio_refid": "{req.get("fish_audio_refid", "59cb5986671546eaa6ca8ae6f29f6d22")}"' + '}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
else:
|
154 |
llm_name = req["llm_name"]
|
155 |
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
|
|
150 |
llm_name = req["llm_name"]
|
151 |
api_key = '{' + f'"fish_audio_ak": "{req.get("fish_audio_ak", "")}", ' \
|
152 |
f'"fish_audio_refid": "{req.get("fish_audio_refid", "59cb5986671546eaa6ca8ae6f29f6d22")}"' + '}'
|
153 |
+
elif factory == "Google Cloud":
|
154 |
+
llm_name = req["llm_name"]
|
155 |
+
api_key = (
|
156 |
+
"{" + f'"google_project_id": "{req.get("google_project_id", "")}", '
|
157 |
+
f'"google_region": "{req.get("google_region", "")}", '
|
158 |
+
f'"google_service_account_key": "{req.get("google_service_account_key", "")}"'
|
159 |
+
+ "}"
|
160 |
+
)
|
161 |
else:
|
162 |
llm_name = req["llm_name"]
|
163 |
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
conf/llm_factories.json
CHANGED
@@ -3352,6 +3352,13 @@
|
|
3352 |
"model_type": "rerank"
|
3353 |
}
|
3354 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3355 |
}
|
3356 |
]
|
3357 |
}
|
|
|
3352 |
"model_type": "rerank"
|
3353 |
}
|
3354 |
]
|
3355 |
+
},
|
3356 |
+
{
|
3357 |
+
"name": "Google Cloud",
|
3358 |
+
"logo": "",
|
3359 |
+
"tags": "LLM",
|
3360 |
+
"status": "1",
|
3361 |
+
"llm": []
|
3362 |
}
|
3363 |
]
|
3364 |
}
|
rag/llm/__init__.py
CHANGED
@@ -107,6 +107,7 @@ ChatModel = {
|
|
107 |
"XunFei Spark": SparkChat,
|
108 |
"BaiduYiyan": BaiduYiyanChat,
|
109 |
"Anthropic": AnthropicChat,
|
|
|
110 |
}
|
111 |
|
112 |
|
|
|
107 |
"XunFei Spark": SparkChat,
|
108 |
"BaiduYiyan": BaiduYiyanChat,
|
109 |
"Anthropic": AnthropicChat,
|
110 |
+
"Google Cloud": GoogleChat,
|
111 |
}
|
112 |
|
113 |
|
rag/llm/chat_model.py
CHANGED
@@ -701,9 +701,13 @@ class GeminiChat(Base):
|
|
701 |
self.model = GenerativeModel(model_name=self.model_name)
|
702 |
self.model._client = _client
|
703 |
|
|
|
704 |
def chat(self,system,history,gen_conf):
|
|
|
|
|
705 |
if system:
|
706 |
-
|
|
|
707 |
if 'max_tokens' in gen_conf:
|
708 |
gen_conf['max_output_tokens'] = gen_conf['max_tokens']
|
709 |
for k in list(gen_conf.keys()):
|
@@ -725,8 +729,10 @@ class GeminiChat(Base):
|
|
725 |
return "**ERROR**: " + str(e), 0
|
726 |
|
727 |
def chat_streamly(self, system, history, gen_conf):
|
|
|
|
|
728 |
if system:
|
729 |
-
|
730 |
if 'max_tokens' in gen_conf:
|
731 |
gen_conf['max_output_tokens'] = gen_conf['max_tokens']
|
732 |
for k in list(gen_conf.keys()):
|
@@ -1257,3 +1263,154 @@ class AnthropicChat(Base):
|
|
1257 |
yield ans + "\n**ERROR**: " + str(e)
|
1258 |
|
1259 |
yield total_tokens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
701 |
self.model = GenerativeModel(model_name=self.model_name)
|
702 |
self.model._client = _client
|
703 |
|
704 |
+
|
705 |
def chat(self,system,history,gen_conf):
|
706 |
+
from google.generativeai.types import content_types
|
707 |
+
|
708 |
if system:
|
709 |
+
self.model._system_instruction = content_types.to_content(system)
|
710 |
+
|
711 |
if 'max_tokens' in gen_conf:
|
712 |
gen_conf['max_output_tokens'] = gen_conf['max_tokens']
|
713 |
for k in list(gen_conf.keys()):
|
|
|
729 |
return "**ERROR**: " + str(e), 0
|
730 |
|
731 |
def chat_streamly(self, system, history, gen_conf):
|
732 |
+
from google.generativeai.types import content_types
|
733 |
+
|
734 |
if system:
|
735 |
+
self.model._system_instruction = content_types.to_content(system)
|
736 |
if 'max_tokens' in gen_conf:
|
737 |
gen_conf['max_output_tokens'] = gen_conf['max_tokens']
|
738 |
for k in list(gen_conf.keys()):
|
|
|
1263 |
yield ans + "\n**ERROR**: " + str(e)
|
1264 |
|
1265 |
yield total_tokens
|
1266 |
+
|
1267 |
+
|
1268 |
+
class GoogleChat(Base):
|
1269 |
+
def __init__(self, key, model_name, base_url=None):
|
1270 |
+
from google.oauth2 import service_account
|
1271 |
+
import base64
|
1272 |
+
|
1273 |
+
key = json.load(key)
|
1274 |
+
access_token = json.loads(
|
1275 |
+
base64.b64decode(key.get("google_service_account_key", ""))
|
1276 |
+
)
|
1277 |
+
project_id = key.get("google_project_id", "")
|
1278 |
+
region = key.get("google_region", "")
|
1279 |
+
|
1280 |
+
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
|
1281 |
+
self.model_name = model_name
|
1282 |
+
self.system = ""
|
1283 |
+
|
1284 |
+
if "claude" in self.model_name:
|
1285 |
+
from anthropic import AnthropicVertex
|
1286 |
+
from google.auth.transport.requests import Request
|
1287 |
+
|
1288 |
+
if access_token:
|
1289 |
+
credits = service_account.Credentials.from_service_account_info(
|
1290 |
+
access_token, scopes=scopes
|
1291 |
+
)
|
1292 |
+
request = Request()
|
1293 |
+
credits.refresh(request)
|
1294 |
+
token = credits.token
|
1295 |
+
self.client = AnthropicVertex(
|
1296 |
+
region=region, project_id=project_id, access_token=token
|
1297 |
+
)
|
1298 |
+
else:
|
1299 |
+
self.client = AnthropicVertex(region=region, project_id=project_id)
|
1300 |
+
else:
|
1301 |
+
from google.cloud import aiplatform
|
1302 |
+
import vertexai.generative_models as glm
|
1303 |
+
|
1304 |
+
if access_token:
|
1305 |
+
credits = service_account.Credentials.from_service_account_info(
|
1306 |
+
access_token
|
1307 |
+
)
|
1308 |
+
aiplatform.init(
|
1309 |
+
credentials=credits, project=project_id, location=region
|
1310 |
+
)
|
1311 |
+
else:
|
1312 |
+
aiplatform.init(project=project_id, location=region)
|
1313 |
+
self.client = glm.GenerativeModel(model_name=self.model_name)
|
1314 |
+
|
1315 |
+
def chat(self, system, history, gen_conf):
|
1316 |
+
if system:
|
1317 |
+
self.system = system
|
1318 |
+
|
1319 |
+
if "claude" in self.model_name:
|
1320 |
+
if "max_tokens" not in gen_conf:
|
1321 |
+
gen_conf["max_tokens"] = 4096
|
1322 |
+
try:
|
1323 |
+
response = self.client.messages.create(
|
1324 |
+
model=self.model_name,
|
1325 |
+
messages=history,
|
1326 |
+
system=self.system,
|
1327 |
+
stream=False,
|
1328 |
+
**gen_conf,
|
1329 |
+
).json()
|
1330 |
+
ans = response["content"][0]["text"]
|
1331 |
+
if response["stop_reason"] == "max_tokens":
|
1332 |
+
ans += (
|
1333 |
+
"...\nFor the content length reason, it stopped, continue?"
|
1334 |
+
if is_english([ans])
|
1335 |
+
else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
1336 |
+
)
|
1337 |
+
return (
|
1338 |
+
ans,
|
1339 |
+
response["usage"]["input_tokens"]
|
1340 |
+
+ response["usage"]["output_tokens"],
|
1341 |
+
)
|
1342 |
+
except Exception as e:
|
1343 |
+
return ans + "\n**ERROR**: " + str(e), 0
|
1344 |
+
else:
|
1345 |
+
self.client._system_instruction = self.system
|
1346 |
+
if "max_tokens" in gen_conf:
|
1347 |
+
gen_conf["max_output_tokens"] = gen_conf["max_tokens"]
|
1348 |
+
for k in list(gen_conf.keys()):
|
1349 |
+
if k not in ["temperature", "top_p", "max_output_tokens"]:
|
1350 |
+
del gen_conf[k]
|
1351 |
+
for item in history:
|
1352 |
+
if "role" in item and item["role"] == "assistant":
|
1353 |
+
item["role"] = "model"
|
1354 |
+
if "content" in item:
|
1355 |
+
item["parts"] = item.pop("content")
|
1356 |
+
try:
|
1357 |
+
response = self.client.generate_content(
|
1358 |
+
history, generation_config=gen_conf
|
1359 |
+
)
|
1360 |
+
ans = response.text
|
1361 |
+
return ans, response.usage_metadata.total_token_count
|
1362 |
+
except Exception as e:
|
1363 |
+
return "**ERROR**: " + str(e), 0
|
1364 |
+
|
1365 |
+
def chat_streamly(self, system, history, gen_conf):
|
1366 |
+
if system:
|
1367 |
+
self.system = system
|
1368 |
+
|
1369 |
+
if "claude" in self.model_name:
|
1370 |
+
if "max_tokens" not in gen_conf:
|
1371 |
+
gen_conf["max_tokens"] = 4096
|
1372 |
+
ans = ""
|
1373 |
+
total_tokens = 0
|
1374 |
+
try:
|
1375 |
+
response = self.client.messages.create(
|
1376 |
+
model=self.model_name,
|
1377 |
+
messages=history,
|
1378 |
+
system=self.system,
|
1379 |
+
stream=True,
|
1380 |
+
**gen_conf,
|
1381 |
+
)
|
1382 |
+
for res in response.iter_lines():
|
1383 |
+
res = res.decode("utf-8")
|
1384 |
+
if "content_block_delta" in res and "data" in res:
|
1385 |
+
text = json.loads(res[6:])["delta"]["text"]
|
1386 |
+
ans += text
|
1387 |
+
total_tokens += num_tokens_from_string(text)
|
1388 |
+
except Exception as e:
|
1389 |
+
yield ans + "\n**ERROR**: " + str(e)
|
1390 |
+
|
1391 |
+
yield total_tokens
|
1392 |
+
else:
|
1393 |
+
self.client._system_instruction = self.system
|
1394 |
+
if "max_tokens" in gen_conf:
|
1395 |
+
gen_conf["max_output_tokens"] = gen_conf["max_tokens"]
|
1396 |
+
for k in list(gen_conf.keys()):
|
1397 |
+
if k not in ["temperature", "top_p", "max_output_tokens"]:
|
1398 |
+
del gen_conf[k]
|
1399 |
+
for item in history:
|
1400 |
+
if "role" in item and item["role"] == "assistant":
|
1401 |
+
item["role"] = "model"
|
1402 |
+
if "content" in item:
|
1403 |
+
item["parts"] = item.pop("content")
|
1404 |
+
ans = ""
|
1405 |
+
try:
|
1406 |
+
response = self.model.generate_content(
|
1407 |
+
history, generation_config=gen_conf, stream=True
|
1408 |
+
)
|
1409 |
+
for resp in response:
|
1410 |
+
ans += resp.text
|
1411 |
+
yield ans
|
1412 |
+
|
1413 |
+
except Exception as e:
|
1414 |
+
yield ans + "\n**ERROR**: " + str(e)
|
1415 |
+
|
1416 |
+
yield response._chunks[-1].usage_metadata.total_token_count
|
requirements.txt
CHANGED
@@ -85,6 +85,7 @@ tiktoken==0.6.0
|
|
85 |
torch==2.3.0
|
86 |
transformers==4.38.1
|
87 |
umap==0.1.1
|
|
|
88 |
volcengine==1.0.146
|
89 |
voyageai==0.2.3
|
90 |
webdriver_manager==4.0.1
|
|
|
85 |
torch==2.3.0
|
86 |
transformers==4.38.1
|
87 |
umap==0.1.1
|
88 |
+
vertexai==1.64.0
|
89 |
volcengine==1.0.146
|
90 |
voyageai==0.2.3
|
91 |
webdriver_manager==4.0.1
|
requirements_arm.txt
CHANGED
@@ -167,3 +167,4 @@ scholarly==1.7.11
|
|
167 |
deepl==1.18.0
|
168 |
psycopg2-binary==2.9.9
|
169 |
tabulate==0.9.0
|
|
|
|
167 |
deepl==1.18.0
|
168 |
psycopg2-binary==2.9.9
|
169 |
tabulate==0.9.0
|
170 |
+
vertexai==1.64.0
|
web/src/assets/svg/llm/google-cloud.svg
ADDED
|
web/src/locales/en.ts
CHANGED
@@ -499,6 +499,7 @@ The above is the content you need to summarize.`,
|
|
499 |
upgrade: 'Upgrade',
|
500 |
addLlmTitle: 'Add LLM',
|
501 |
modelName: 'Model name',
|
|
|
502 |
modelUid: 'Model UID',
|
503 |
modelNameMessage: 'Please input your model name!',
|
504 |
modelType: 'Model type',
|
@@ -551,6 +552,15 @@ The above is the content you need to summarize.`,
|
|
551 |
addFishAudioRefID: 'FishAudio Refrence ID',
|
552 |
addFishAudioRefIDMessage:
|
553 |
'Please input the Reference ID (leave blank to use the default model).',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
554 |
},
|
555 |
message: {
|
556 |
registered: 'Registered!',
|
|
|
499 |
upgrade: 'Upgrade',
|
500 |
addLlmTitle: 'Add LLM',
|
501 |
modelName: 'Model name',
|
502 |
+
modelID: 'Model ID',
|
503 |
modelUid: 'Model UID',
|
504 |
modelNameMessage: 'Please input your model name!',
|
505 |
modelType: 'Model type',
|
|
|
552 |
addFishAudioRefID: 'FishAudio Refrence ID',
|
553 |
addFishAudioRefIDMessage:
|
554 |
'Please input the Reference ID (leave blank to use the default model).',
|
555 |
+
GoogleModelIDMessage: 'Please input your model ID!',
|
556 |
+
addGoogleProjectID: 'Project ID',
|
557 |
+
GoogleProjectIDMessage: 'Please input your Project ID',
|
558 |
+
addGoogleServiceAccountKey:
|
559 |
+
'Service Account Key(Leave blank if you use Application Default Credentials)',
|
560 |
+
GoogleServiceAccountKeyMessage:
|
561 |
+
'Please input Google Cloud Service Account Key in base64 format',
|
562 |
+
addGoogleRegion: 'Google Cloud Region',
|
563 |
+
GoogleRegionMessage: 'Please input Google Cloud Region',
|
564 |
},
|
565 |
message: {
|
566 |
registered: 'Registered!',
|
web/src/locales/zh-traditional.ts
CHANGED
@@ -461,6 +461,7 @@ export default {
|
|
461 |
upgrade: '升級',
|
462 |
addLlmTitle: '添加Llm',
|
463 |
modelName: '模型名稱',
|
|
|
464 |
modelUid: '模型uid',
|
465 |
modelType: '模型類型',
|
466 |
addLlmBaseUrl: '基礎 Url',
|
@@ -511,6 +512,15 @@ export default {
|
|
511 |
addFishAudioAKMessage: '請輸入 API KEY',
|
512 |
addFishAudioRefID: 'FishAudio Refrence ID',
|
513 |
addFishAudioRefIDMessage: '請輸入引用模型的ID(留空表示使用默認模型)',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
514 |
},
|
515 |
message: {
|
516 |
registered: '註冊成功',
|
|
|
461 |
upgrade: '升級',
|
462 |
addLlmTitle: '添加Llm',
|
463 |
modelName: '模型名稱',
|
464 |
+
modelID: '模型ID',
|
465 |
modelUid: '模型uid',
|
466 |
modelType: '模型類型',
|
467 |
addLlmBaseUrl: '基礎 Url',
|
|
|
512 |
addFishAudioAKMessage: '請輸入 API KEY',
|
513 |
addFishAudioRefID: 'FishAudio Refrence ID',
|
514 |
addFishAudioRefIDMessage: '請輸入引用模型的ID(留空表示使用默認模型)',
|
515 |
+
GoogleModelIDMessage: '請輸入 model ID!',
|
516 |
+
addGoogleProjectID: 'Project ID',
|
517 |
+
GoogleProjectIDMessage: '請輸入 Project ID',
|
518 |
+
addGoogleServiceAccountKey:
|
519 |
+
'Service Account Key(Leave blank if you use Application Default Credentials)',
|
520 |
+
GoogleServiceAccountKeyMessage:
|
521 |
+
'請輸入 Google Cloud Service Account Key in base64 format',
|
522 |
+
addGoogleRegion: 'Google Cloud 區域',
|
523 |
+
GoogleRegionMessage: '請輸入 Google Cloud 區域',
|
524 |
},
|
525 |
message: {
|
526 |
registered: '註冊成功',
|
web/src/locales/zh.ts
CHANGED
@@ -478,6 +478,7 @@ export default {
|
|
478 |
upgrade: '升级',
|
479 |
addLlmTitle: '添加 LLM',
|
480 |
modelName: '模型名称',
|
|
|
481 |
modelUid: '模型UID',
|
482 |
modelType: '模型类型',
|
483 |
addLlmBaseUrl: '基础 Url',
|
@@ -528,6 +529,15 @@ export default {
|
|
528 |
FishAudioAKMessage: '请输入 API KEY',
|
529 |
addFishAudioRefID: 'FishAudio Refrence ID',
|
530 |
FishAudioRefIDMessage: '请输入引用模型的ID(留空表示使用默认模型)',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
531 |
},
|
532 |
message: {
|
533 |
registered: '注册成功',
|
|
|
478 |
upgrade: '升级',
|
479 |
addLlmTitle: '添加 LLM',
|
480 |
modelName: '模型名称',
|
481 |
+
modelID: '模型ID',
|
482 |
modelUid: '模型UID',
|
483 |
modelType: '模型类型',
|
484 |
addLlmBaseUrl: '基础 Url',
|
|
|
529 |
FishAudioAKMessage: '请输入 API KEY',
|
530 |
addFishAudioRefID: 'FishAudio Refrence ID',
|
531 |
FishAudioRefIDMessage: '请输入引用模型的ID(留空表示使用默认模型)',
|
532 |
+
GoogleModelIDMessage: '请输入 model ID!',
|
533 |
+
addGoogleProjectID: 'Project ID',
|
534 |
+
GoogleProjectIDMessage: '请输入 Project ID',
|
535 |
+
addGoogleServiceAccountKey:
|
536 |
+
'Service Account Key(Leave blank if you use Application Default Credentials)',
|
537 |
+
GoogleServiceAccountKeyMessage:
|
538 |
+
'请输入 Google Cloud Service Account Key in base64 format',
|
539 |
+
addGoogleRegion: 'Google Cloud 区域',
|
540 |
+
GoogleRegionMessage: '请输入 Google Cloud 区域',
|
541 |
},
|
542 |
message: {
|
543 |
registered: '注册成功',
|
web/src/pages/user-setting/setting-model/constant.ts
CHANGED
@@ -39,6 +39,7 @@ export const IconMap = {
|
|
39 |
'Tencent Cloud': 'tencent-cloud',
|
40 |
Anthropic: 'anthropic',
|
41 |
'Voyage AI': 'voyage',
|
|
|
42 |
};
|
43 |
|
44 |
export const BedrockRegionList = [
|
|
|
39 |
'Tencent Cloud': 'tencent-cloud',
|
40 |
Anthropic: 'anthropic',
|
41 |
'Voyage AI': 'voyage',
|
42 |
+
'Google Cloud': 'google-cloud',
|
43 |
};
|
44 |
|
45 |
export const BedrockRegionList = [
|
web/src/pages/user-setting/setting-model/google-modal/index.tsx
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { useTranslate } from '@/hooks/common-hooks';
|
2 |
+
import { IModalProps } from '@/interfaces/common';
|
3 |
+
import { IAddLlmRequestBody } from '@/interfaces/request/llm';
|
4 |
+
import { Form, Input, Modal, Select } from 'antd';
|
5 |
+
|
6 |
+
type FieldType = IAddLlmRequestBody & {
|
7 |
+
google_project_id: string;
|
8 |
+
google_region: string;
|
9 |
+
google_service_account_key: string;
|
10 |
+
};
|
11 |
+
|
12 |
+
const { Option } = Select;
|
13 |
+
|
14 |
+
const GoogleModal = ({
|
15 |
+
visible,
|
16 |
+
hideModal,
|
17 |
+
onOk,
|
18 |
+
loading,
|
19 |
+
llmFactory,
|
20 |
+
}: IModalProps<IAddLlmRequestBody> & { llmFactory: string }) => {
|
21 |
+
const [form] = Form.useForm<FieldType>();
|
22 |
+
|
23 |
+
const { t } = useTranslate('setting');
|
24 |
+
const handleOk = async () => {
|
25 |
+
const values = await form.validateFields();
|
26 |
+
|
27 |
+
const data = {
|
28 |
+
...values,
|
29 |
+
llm_factory: llmFactory,
|
30 |
+
};
|
31 |
+
|
32 |
+
onOk?.(data);
|
33 |
+
};
|
34 |
+
|
35 |
+
return (
|
36 |
+
<Modal
|
37 |
+
title={t('addLlmTitle', { name: llmFactory })}
|
38 |
+
open={visible}
|
39 |
+
onOk={handleOk}
|
40 |
+
onCancel={hideModal}
|
41 |
+
okButtonProps={{ loading }}
|
42 |
+
>
|
43 |
+
<Form
|
44 |
+
name="basic"
|
45 |
+
style={{ maxWidth: 600 }}
|
46 |
+
autoComplete="off"
|
47 |
+
layout={'vertical'}
|
48 |
+
form={form}
|
49 |
+
>
|
50 |
+
<Form.Item<FieldType>
|
51 |
+
label={t('modelType')}
|
52 |
+
name="model_type"
|
53 |
+
initialValue={'chat'}
|
54 |
+
rules={[{ required: true, message: t('modelTypeMessage') }]}
|
55 |
+
>
|
56 |
+
<Select placeholder={t('modelTypeMessage')}>
|
57 |
+
<Option value="chat">chat</Option>
|
58 |
+
</Select>
|
59 |
+
</Form.Item>
|
60 |
+
<Form.Item<FieldType>
|
61 |
+
label={t('modelID')}
|
62 |
+
name="llm_name"
|
63 |
+
rules={[{ required: true, message: t('GoogleModelIDMessage') }]}
|
64 |
+
>
|
65 |
+
<Input placeholder={t('GoogleModelIDMessage')} />
|
66 |
+
</Form.Item>
|
67 |
+
<Form.Item<FieldType>
|
68 |
+
label={t('addGoogleProjectID')}
|
69 |
+
name="google_project_id"
|
70 |
+
rules={[{ required: true, message: t('GoogleProjectIDMessage') }]}
|
71 |
+
>
|
72 |
+
<Input placeholder={t('GoogleProjectIDMessage')} />
|
73 |
+
</Form.Item>
|
74 |
+
<Form.Item<FieldType>
|
75 |
+
label={t('addGoogleRegion')}
|
76 |
+
name="google_region"
|
77 |
+
rules={[{ required: true, message: t('GoogleRegionMessage') }]}
|
78 |
+
>
|
79 |
+
<Input placeholder={t('GoogleRegionMessage')} />
|
80 |
+
</Form.Item>
|
81 |
+
<Form.Item<FieldType>
|
82 |
+
label={t('addGoogleServiceAccountKey')}
|
83 |
+
name="google_service_account_key"
|
84 |
+
rules={[
|
85 |
+
{ required: true, message: t('GoogleServiceAccountKeyMessage') },
|
86 |
+
]}
|
87 |
+
>
|
88 |
+
<Input placeholder={t('GoogleServiceAccountKeyMessage')} />
|
89 |
+
</Form.Item>
|
90 |
+
</Form>
|
91 |
+
</Modal>
|
92 |
+
);
|
93 |
+
};
|
94 |
+
|
95 |
+
export default GoogleModal;
|
web/src/pages/user-setting/setting-model/hooks.ts
CHANGED
@@ -298,6 +298,33 @@ export const useSubmitFishAudio = () => {
|
|
298 |
};
|
299 |
};
|
300 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
export const useSubmitBedrock = () => {
|
302 |
const { addLlm, loading } = useAddLlm();
|
303 |
const {
|
|
|
298 |
};
|
299 |
};
|
300 |
|
301 |
+
export const useSubmitGoogle = () => {
|
302 |
+
const { addLlm, loading } = useAddLlm();
|
303 |
+
const {
|
304 |
+
visible: GoogleAddingVisible,
|
305 |
+
hideModal: hideGoogleAddingModal,
|
306 |
+
showModal: showGoogleAddingModal,
|
307 |
+
} = useSetModalState();
|
308 |
+
|
309 |
+
const onGoogleAddingOk = useCallback(
|
310 |
+
async (payload: IAddLlmRequestBody) => {
|
311 |
+
const ret = await addLlm(payload);
|
312 |
+
if (ret === 0) {
|
313 |
+
hideGoogleAddingModal();
|
314 |
+
}
|
315 |
+
},
|
316 |
+
[hideGoogleAddingModal, addLlm],
|
317 |
+
);
|
318 |
+
|
319 |
+
return {
|
320 |
+
GoogleAddingLoading: loading,
|
321 |
+
onGoogleAddingOk,
|
322 |
+
GoogleAddingVisible,
|
323 |
+
hideGoogleAddingModal,
|
324 |
+
showGoogleAddingModal,
|
325 |
+
};
|
326 |
+
};
|
327 |
+
|
328 |
export const useSubmitBedrock = () => {
|
329 |
const { addLlm, loading } = useAddLlm();
|
330 |
const {
|
web/src/pages/user-setting/setting-model/index.tsx
CHANGED
@@ -32,11 +32,13 @@ import ApiKeyModal from './api-key-modal';
|
|
32 |
import BedrockModal from './bedrock-modal';
|
33 |
import { IconMap } from './constant';
|
34 |
import FishAudioModal from './fish-audio-modal';
|
|
|
35 |
import {
|
36 |
useHandleDeleteLlm,
|
37 |
useSubmitApiKey,
|
38 |
useSubmitBedrock,
|
39 |
useSubmitFishAudio,
|
|
|
40 |
useSubmitHunyuan,
|
41 |
useSubmitOllama,
|
42 |
useSubmitSpark,
|
@@ -104,7 +106,8 @@ const ModelCard = ({ item, clickApiKey }: IModelCardProps) => {
|
|
104 |
item.name === 'XunFei Spark' ||
|
105 |
item.name === 'BaiduYiyan' ||
|
106 |
item.name === 'Fish Audio' ||
|
107 |
-
item.name === 'Tencent Cloud'
|
|
|
108 |
? t('addTheModel')
|
109 |
: 'API-Key'}
|
110 |
<SettingOutlined />
|
@@ -186,6 +189,14 @@ const UserSettingModel = () => {
|
|
186 |
HunyuanAddingLoading,
|
187 |
} = useSubmitHunyuan();
|
188 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
const {
|
190 |
TencentCloudAddingVisible,
|
191 |
hideTencentCloudAddingModal,
|
@@ -235,6 +246,7 @@ const UserSettingModel = () => {
|
|
235 |
BaiduYiyan: showyiyanAddingModal,
|
236 |
'Fish Audio': showFishAudioAddingModal,
|
237 |
'Tencent Cloud': showTencentCloudAddingModal,
|
|
|
238 |
}),
|
239 |
[
|
240 |
showBedrockAddingModal,
|
@@ -244,6 +256,7 @@ const UserSettingModel = () => {
|
|
244 |
showSparkAddingModal,
|
245 |
showyiyanAddingModal,
|
246 |
showFishAudioAddingModal,
|
|
|
247 |
],
|
248 |
);
|
249 |
|
@@ -364,6 +377,13 @@ const UserSettingModel = () => {
|
|
364 |
loading={HunyuanAddingLoading}
|
365 |
llmFactory={'Tencent Hunyuan'}
|
366 |
></HunyuanModal>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
<TencentCloudModal
|
368 |
visible={TencentCloudAddingVisible}
|
369 |
hideModal={hideTencentCloudAddingModal}
|
|
|
32 |
import BedrockModal from './bedrock-modal';
|
33 |
import { IconMap } from './constant';
|
34 |
import FishAudioModal from './fish-audio-modal';
|
35 |
+
import GoogleModal from './google-modal';
|
36 |
import {
|
37 |
useHandleDeleteLlm,
|
38 |
useSubmitApiKey,
|
39 |
useSubmitBedrock,
|
40 |
useSubmitFishAudio,
|
41 |
+
useSubmitGoogle,
|
42 |
useSubmitHunyuan,
|
43 |
useSubmitOllama,
|
44 |
useSubmitSpark,
|
|
|
106 |
item.name === 'XunFei Spark' ||
|
107 |
item.name === 'BaiduYiyan' ||
|
108 |
item.name === 'Fish Audio' ||
|
109 |
+
item.name === 'Tencent Cloud' ||
|
110 |
+
item.name === 'Google Cloud'
|
111 |
? t('addTheModel')
|
112 |
: 'API-Key'}
|
113 |
<SettingOutlined />
|
|
|
189 |
HunyuanAddingLoading,
|
190 |
} = useSubmitHunyuan();
|
191 |
|
192 |
+
const {
|
193 |
+
GoogleAddingVisible,
|
194 |
+
hideGoogleAddingModal,
|
195 |
+
showGoogleAddingModal,
|
196 |
+
onGoogleAddingOk,
|
197 |
+
GoogleAddingLoading,
|
198 |
+
} = useSubmitGoogle();
|
199 |
+
|
200 |
const {
|
201 |
TencentCloudAddingVisible,
|
202 |
hideTencentCloudAddingModal,
|
|
|
246 |
BaiduYiyan: showyiyanAddingModal,
|
247 |
'Fish Audio': showFishAudioAddingModal,
|
248 |
'Tencent Cloud': showTencentCloudAddingModal,
|
249 |
+
'Google Cloud': showGoogleAddingModal,
|
250 |
}),
|
251 |
[
|
252 |
showBedrockAddingModal,
|
|
|
256 |
showSparkAddingModal,
|
257 |
showyiyanAddingModal,
|
258 |
showFishAudioAddingModal,
|
259 |
+
showGoogleAddingModal,
|
260 |
],
|
261 |
);
|
262 |
|
|
|
377 |
loading={HunyuanAddingLoading}
|
378 |
llmFactory={'Tencent Hunyuan'}
|
379 |
></HunyuanModal>
|
380 |
+
<GoogleModal
|
381 |
+
visible={GoogleAddingVisible}
|
382 |
+
hideModal={hideGoogleAddingModal}
|
383 |
+
onOk={onGoogleAddingOk}
|
384 |
+
loading={GoogleAddingLoading}
|
385 |
+
llmFactory={'Google Cloud'}
|
386 |
+
></GoogleModal>
|
387 |
<TencentCloudModal
|
388 |
visible={TencentCloudAddingVisible}
|
389 |
hideModal={hideTencentCloudAddingModal}
|