Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from transformers import pipeline
|
|
4 |
from pptx import Presentation
|
5 |
from pptx.util import Inches
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer
|
7 |
-
from llama_cpp import LLMCppModel
|
8 |
|
9 |
|
10 |
# Préprompt pour donner des instructions au modèle
|
@@ -25,7 +25,9 @@ Voici le texte à transformer en présentation :"""
|
|
25 |
#from llama_cpp import LLMCppModel
|
26 |
model_path = "MisterAI/Bartowski_MistralAI_Mistral-Small-24B-Base-2501-GGUF"
|
27 |
#text_to_presentation = LLMCppModel(model_path)
|
28 |
-
text_to_presentation = model_path
|
|
|
|
|
29 |
|
30 |
# Chargement du tokenizer depuis Hugging Face
|
31 |
#from transformers import AutoTokenizer
|
|
|
4 |
from pptx import Presentation
|
5 |
from pptx.util import Inches
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer
|
7 |
+
from llama_cpp import Llama LLMCppModel
|
8 |
|
9 |
|
10 |
# Préprompt pour donner des instructions au modèle
|
|
|
25 |
#from llama_cpp import LLMCppModel
|
26 |
model_path = "MisterAI/Bartowski_MistralAI_Mistral-Small-24B-Base-2501-GGUF"
|
27 |
#text_to_presentation = LLMCppModel(model_path)
|
28 |
+
#text_to_presentation = model_path
|
29 |
+
text_to_presentation = Llama(model_path, )
|
30 |
+
text_to_presentation = Llama(model_path, verbose=True)
|
31 |
|
32 |
# Chargement du tokenizer depuis Hugging Face
|
33 |
#from transformers import AutoTokenizer
|