Update app.py
Browse files
app.py
CHANGED
@@ -24,14 +24,16 @@ Voici le texte à transformer en présentation :"""
|
|
24 |
|
25 |
# Chargement du modèle IA depuis Hugging Face
|
26 |
#from llama_cpp import LLMCppModel
|
27 |
-
model_path = "MisterAI/Bartowski_MistralAI_Mistral-Small-24B-Base-2501-GGUF"
|
|
|
28 |
#text_to_presentation = LLMCppModel(model_path)
|
29 |
#text_to_presentation = model_path
|
30 |
text_to_presentation = Llama(model_path, verbose=True)
|
31 |
|
32 |
# Chargement du tokenizer depuis Hugging Face
|
33 |
#from transformers import AutoTokenizer
|
34 |
-
tokenizer = AutoTokenizer.from_pretrained("MisterAI/Bartowski_MistralAI_Mistral-Small-24B-Base-2501-GGUF")
|
|
|
35 |
|
36 |
|
37 |
|
|
|
24 |
|
25 |
# Chargement du modèle IA depuis Hugging Face
|
26 |
#from llama_cpp import LLMCppModel
|
27 |
+
#model_path = "MisterAI/Bartowski_MistralAI_Mistral-Small-24B-Base-2501-GGUF"
|
28 |
+
model_path = "https://huggingface.co/MisterAI/Bartowski_MistralAI_Mistral-Small-24B-Base-2501-GGUF/tree/main/mistralai_Mistral-Small-24B-Base-2501-Q8_0.gguf"
|
29 |
#text_to_presentation = LLMCppModel(model_path)
|
30 |
#text_to_presentation = model_path
|
31 |
text_to_presentation = Llama(model_path, verbose=True)
|
32 |
|
33 |
# Chargement du tokenizer depuis Hugging Face
|
34 |
#from transformers import AutoTokenizer
|
35 |
+
#tokenizer = AutoTokenizer.from_pretrained("MisterAI/Bartowski_MistralAI_Mistral-Small-24B-Base-2501-GGUF")
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
37 |
|
38 |
|
39 |
|