Update app.py
Browse files
app.py
CHANGED
@@ -103,6 +103,30 @@ class PresentationGenerator:
|
|
103 |
# )
|
104 |
|
105 |
##Modif01 : Correction Pour Flux Non Chargé sur HFSpace
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
def load_image_model(self, model_name):
|
107 |
"""Charge le modèle de génération d'images"""
|
108 |
model_id = IMAGE_MODELS[model_name]
|
@@ -112,6 +136,7 @@ class PresentationGenerator:
|
|
112 |
torch_dtype=torch.bfloat16
|
113 |
)
|
114 |
self.image_pipeline.enable_model_cpu_offload() # Économise de la VRAM en déchargeant le modèle sur le CPU
|
|
|
115 |
print(f"Modèle d'image FLUX chargé : {model_id}")
|
116 |
else:
|
117 |
self.image_pipeline = pipeline(
|
@@ -119,7 +144,13 @@ class PresentationGenerator:
|
|
119 |
model=model_id,
|
120 |
token=self.token
|
121 |
)
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
|
125 |
def generate_text(self, prompt, temperature=0.7, max_tokens=4096):
|
|
|
103 |
# )
|
104 |
|
105 |
##Modif01 : Correction Pour Flux Non Chargé sur HFSpace
|
106 |
+
# def load_image_model(self, model_name):
|
107 |
+
# """Charge le modèle de génération d'images"""
|
108 |
+
# model_id = IMAGE_MODELS[model_name]
|
109 |
+
# if model_id == "black-forest-labs/FLUX.1-schnell":
|
110 |
+
# self.image_pipeline = FluxPipeline.from_pretrained(
|
111 |
+
# model_id,
|
112 |
+
# torch_dtype=torch.bfloat16
|
113 |
+
# )
|
114 |
+
# self.image_pipeline.enable_model_cpu_offload() # Économise de la VRAM en déchargeant le modèle sur le CPU
|
115 |
+
# print(f"Modèle d'image FLUX chargé : {model_id}")
|
116 |
+
# else:
|
117 |
+
# self.image_pipeline = pipeline(
|
118 |
+
# "text-to-image",
|
119 |
+
# model=model_id,
|
120 |
+
# token=self.token
|
121 |
+
# )
|
122 |
+
# print(f"Modèle d'image chargé : {model_id}")
|
123 |
+
|
124 |
+
|
125 |
+
##Modif02 : Correction Pour Flux Blocage Chargement a 71% sur HFSpace
|
126 |
+
#Loading pipeline components...: 71%|███████▏ | 5/7 [00:05<00:01, 1.07it/s]You set `add_prefix_space`.
|
127 |
+
#The tokenizer needs to be converted from the slow tokenizers
|
128 |
+
#Loading pipeline components...: 71%|███████▏ | 5/7 [00:05<00:02, 1.15s/it]
|
129 |
+
|
130 |
def load_image_model(self, model_name):
|
131 |
"""Charge le modèle de génération d'images"""
|
132 |
model_id = IMAGE_MODELS[model_name]
|
|
|
136 |
torch_dtype=torch.bfloat16
|
137 |
)
|
138 |
self.image_pipeline.enable_model_cpu_offload() # Économise de la VRAM en déchargeant le modèle sur le CPU
|
139 |
+
self.image_pipeline.tokenizer.add_prefix_space = False # Désactive add_prefix_space
|
140 |
print(f"Modèle d'image FLUX chargé : {model_id}")
|
141 |
else:
|
142 |
self.image_pipeline = pipeline(
|
|
|
144 |
model=model_id,
|
145 |
token=self.token
|
146 |
)
|
147 |
+
print(f"Modèle d'image chargé : {model_id}")
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
|
155 |
|
156 |
def generate_text(self, prompt, temperature=0.7, max_tokens=4096):
|