Update main.py
Browse files
main.py
CHANGED
@@ -24,7 +24,7 @@ from offres_emploi.utils import dt_to_str_iso
|
|
24 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
25 |
from langchain_core.chat_history import BaseChatMessageHistory
|
26 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
27 |
-
|
28 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
|
29 |
|
30 |
session_id = str(uuid.uuid4())
|
@@ -155,7 +155,7 @@ def API_France_Travail_Metier(metier):
|
|
155 |
def Connexion_Mistral():
|
156 |
#endpoint = "https://models.inference.ai.azure.com"
|
157 |
#return Mistral(api_key=os.environ["GITHUB_TOKEN"], server_url=endpoint)
|
158 |
-
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
159 |
#repo_id = "mlx-community/Ministral-8B-Instruct-2410-8bit"
|
160 |
#repo_id = "unsloth/Mistral-Nemo-Base-2407-bnb-4bit"
|
161 |
#repo_id = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
|
@@ -167,9 +167,11 @@ def Connexion_Mistral():
|
|
167 |
#repo_id = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
168 |
#repo_id = "microsoft/Phi-3.5-mini-instruct"
|
169 |
#repo_id = "meta-llama/Llama-3.2-3B-Instruct"
|
170 |
-
llm = HuggingFaceEndpoint(
|
171 |
-
|
172 |
-
)
|
|
|
|
|
173 |
return llm
|
174 |
|
175 |
@cl.step(type="tool", show_input=True)
|
|
|
24 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
25 |
from langchain_core.chat_history import BaseChatMessageHistory
|
26 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
27 |
+
from langchain_openai import ChatOpenAI
|
28 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
|
29 |
|
30 |
session_id = str(uuid.uuid4())
|
|
|
155 |
def Connexion_Mistral():
|
156 |
#endpoint = "https://models.inference.ai.azure.com"
|
157 |
#return Mistral(api_key=os.environ["GITHUB_TOKEN"], server_url=endpoint)
|
158 |
+
#repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
159 |
#repo_id = "mlx-community/Ministral-8B-Instruct-2410-8bit"
|
160 |
#repo_id = "unsloth/Mistral-Nemo-Base-2407-bnb-4bit"
|
161 |
#repo_id = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
|
|
|
167 |
#repo_id = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
168 |
#repo_id = "microsoft/Phi-3.5-mini-instruct"
|
169 |
#repo_id = "meta-llama/Llama-3.2-3B-Instruct"
|
170 |
+
#llm = HuggingFaceEndpoint(
|
171 |
+
# repo_id=repo_id, max_new_tokens=3000, temperature=0.5, top_p=0.7, task="text2text-generation", streaming=True
|
172 |
+
#)
|
173 |
+
llm = ChatOpenAI(model_name="mistralai/Mistral-Small-3.1-24B-Instruct-2503", base_url=os.environ['BASEURL_RENNES_API_KEY'], api_key=os.environ['ENDPOINT_RENNES_API_KEY'])
|
174 |
+
|
175 |
return llm
|
176 |
|
177 |
@cl.step(type="tool", show_input=True)
|