Yangong
commited on
Commit
·
aa075d6
1
Parent(s):
0a66555
feat: add qwen 2.5 models for silicon flow (#3203)
Browse files### What problem does this PR solve?
add qwen 2.5 models for silicon flow
### Type of change
- [X] New Feature (non-breaking change which adds functionality)
- conf/llm_factories.json +61 -7
conf/llm_factories.json
CHANGED
|
@@ -2017,6 +2017,60 @@
|
|
| 2017 |
"max_tokens": 32768,
|
| 2018 |
"model_type": "chat"
|
| 2019 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2020 |
{
|
| 2021 |
"llm_name": "01-ai/Yi-1.5-34B-Chat-16K",
|
| 2022 |
"tags": "LLM,CHAT,16k",
|
|
@@ -2376,11 +2430,11 @@
|
|
| 2376 |
"llm": []
|
| 2377 |
},
|
| 2378 |
{
|
| 2379 |
-
|
| 2380 |
-
|
| 2381 |
-
|
| 2382 |
-
|
| 2383 |
-
|
| 2384 |
-
|
| 2385 |
]
|
| 2386 |
-
}
|
|
|
|
| 2017 |
"max_tokens": 32768,
|
| 2018 |
"model_type": "chat"
|
| 2019 |
},
|
| 2020 |
+
{
|
| 2021 |
+
"llm_name": "Qwen/Qwen2.5-72B-Instruct-128K",
|
| 2022 |
+
"tags": "LLM,CHAT,128k",
|
| 2023 |
+
"max_tokens": 131072,
|
| 2024 |
+
"model_type": "chat"
|
| 2025 |
+
},
|
| 2026 |
+
{
|
| 2027 |
+
"llm_name": "Qwen/Qwen2.5-72B-Instruct",
|
| 2028 |
+
"tags": "LLM,CHAT,32k",
|
| 2029 |
+
"max_tokens": 32768,
|
| 2030 |
+
"model_type": "chat"
|
| 2031 |
+
},
|
| 2032 |
+
{
|
| 2033 |
+
"llm_name": "Qwen/Qwen2.5-7B-Instruct",
|
| 2034 |
+
"tags": "LLM,CHAT,32k",
|
| 2035 |
+
"max_tokens": 32768,
|
| 2036 |
+
"model_type": "chat"
|
| 2037 |
+
},
|
| 2038 |
+
{
|
| 2039 |
+
"llm_name": "Qwen/Qwen2.5-14B-Instruct",
|
| 2040 |
+
"tags": "LLM,CHAT,32k",
|
| 2041 |
+
"max_tokens": 32768,
|
| 2042 |
+
"model_type": "chat"
|
| 2043 |
+
},
|
| 2044 |
+
{
|
| 2045 |
+
"llm_name": "Qwen/Qwen2.5-32B-Instruct",
|
| 2046 |
+
"tags": "LLM,CHAT,32k",
|
| 2047 |
+
"max_tokens": 32768,
|
| 2048 |
+
"model_type": "chat"
|
| 2049 |
+
},
|
| 2050 |
+
{
|
| 2051 |
+
"llm_name": "Qwen/Qwen2.5-Math-72B-Instruct",
|
| 2052 |
+
"tags": "LLM,CHAT,Math,4k",
|
| 2053 |
+
"max_tokens": 4096,
|
| 2054 |
+
"model_type": "chat"
|
| 2055 |
+
},
|
| 2056 |
+
{
|
| 2057 |
+
"llm_name": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 2058 |
+
"tags": "LLM,CHAT,FIM,Coder,32k",
|
| 2059 |
+
"max_tokens": 32768,
|
| 2060 |
+
"model_type": "chat"
|
| 2061 |
+
},
|
| 2062 |
+
{
|
| 2063 |
+
"llm_name": "Pro/Qwen/Qwen2.5-7B-Instruct",
|
| 2064 |
+
"tags": "LLM,CHAT,32k",
|
| 2065 |
+
"max_tokens": 32768,
|
| 2066 |
+
"model_type": "chat"
|
| 2067 |
+
},
|
| 2068 |
+
{
|
| 2069 |
+
"llm_name": "Pro/Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 2070 |
+
"tags": "LLM,CHAT,FIM,Coder,32k",
|
| 2071 |
+
"max_tokens": 32768,
|
| 2072 |
+
"model_type": "chat"
|
| 2073 |
+
},
|
| 2074 |
{
|
| 2075 |
"llm_name": "01-ai/Yi-1.5-34B-Chat-16K",
|
| 2076 |
"tags": "LLM,CHAT,16k",
|
|
|
|
| 2430 |
"llm": []
|
| 2431 |
},
|
| 2432 |
{
|
| 2433 |
+
"name": "HuggingFace",
|
| 2434 |
+
"logo": "",
|
| 2435 |
+
"tags": "TEXT EMBEDDING",
|
| 2436 |
+
"status": "1",
|
| 2437 |
+
"llm": []
|
| 2438 |
+
}
|
| 2439 |
]
|
| 2440 |
+
}
|