Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
-
|
2 |
|
|
|
|
|
3 |
import gradio as gr
|
|
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
5 |
from pptx import Presentation
|
6 |
from pptx.util import Inches, Pt
|
@@ -80,6 +83,7 @@ class PresentationGenerator:
|
|
80 |
n_batch=512,
|
81 |
verbose=False
|
82 |
)
|
|
|
83 |
else:
|
84 |
# Configuration pour les modèles Transformers standards
|
85 |
self.text_tokenizer = AutoTokenizer.from_pretrained(model_id, token=self.token)
|
@@ -89,6 +93,7 @@ class PresentationGenerator:
|
|
89 |
device_map="auto",
|
90 |
token=self.token
|
91 |
)
|
|
|
92 |
|
93 |
def load_image_model(self, model_name):
|
94 |
"""Charge le modèle de génération d'images"""
|
@@ -105,6 +110,7 @@ class PresentationGenerator:
|
|
105 |
temperature=temperature,
|
106 |
echo=False
|
107 |
)
|
|
|
108 |
return response['choices'][0]['text']
|
109 |
else:
|
110 |
inputs = self.text_tokenizer.apply_chat_template(
|
@@ -117,7 +123,9 @@ class PresentationGenerator:
|
|
117 |
max_new_tokens=max_tokens,
|
118 |
temperature=temperature
|
119 |
)
|
120 |
-
|
|
|
|
|
121 |
|
122 |
def generate_image(self, prompt, negative_prompt="", num_inference_steps=30):
|
123 |
"""Génère une image pour la diapositive"""
|
|
|
1 |
+
#139
|
2 |
|
3 |
+
|
4 |
+
import os
|
5 |
import gradio as gr
|
6 |
+
from huggingface_hub import hf_hub_download, login
|
7 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
8 |
from pptx import Presentation
|
9 |
from pptx.util import Inches, Pt
|
|
|
83 |
n_batch=512,
|
84 |
verbose=False
|
85 |
)
|
86 |
+
print(f"Modèle GGUF {model_id} chargé avec succès!")
|
87 |
else:
|
88 |
# Configuration pour les modèles Transformers standards
|
89 |
self.text_tokenizer = AutoTokenizer.from_pretrained(model_id, token=self.token)
|
|
|
93 |
device_map="auto",
|
94 |
token=self.token
|
95 |
)
|
96 |
+
print(f"Modèle Transformers {model_id} chargé avec succès!")
|
97 |
|
98 |
def load_image_model(self, model_name):
|
99 |
"""Charge le modèle de génération d'images"""
|
|
|
110 |
temperature=temperature,
|
111 |
echo=False
|
112 |
)
|
113 |
+
print("Texte généré par Llama :", response['choices'][0]['text'])
|
114 |
return response['choices'][0]['text']
|
115 |
else:
|
116 |
inputs = self.text_tokenizer.apply_chat_template(
|
|
|
123 |
max_new_tokens=max_tokens,
|
124 |
temperature=temperature
|
125 |
)
|
126 |
+
generated_text = self.text_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
127 |
+
print("Texte généré par Transformers :", generated_text)
|
128 |
+
return generated_text
|
129 |
|
130 |
def generate_image(self, prompt, negative_prompt="", num_inference_steps=30):
|
131 |
"""Génère une image pour la diapositive"""
|