|
|
|
|
|
|
|
|
|
import os |
|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
import time |
|
from llm.list_llm import TEXT_MODELS, IMAGE_MODELS |
|
from llm.prompt_llm import PREPROMPT |
|
from python_pptx.python_pptx import PresentationGenerator |
|
|
|
|
|
DEFAULT_MODEL = "ibm-granite/granite-3.1-3b-a800m-Instruct" |
|
|
|
class ExecutionTimer: |
|
def __init__(self): |
|
self.start_time = None |
|
self.last_duration = None |
|
|
|
def start(self): |
|
self.start_time = time.time() |
|
|
|
def get_elapsed(self): |
|
if self.start_time is None: |
|
return 0 |
|
return time.time() - self.start_time |
|
|
|
def stop(self): |
|
if self.start_time is not None: |
|
self.last_duration = self.get_elapsed() |
|
self.start_time = None |
|
return self.last_duration |
|
|
|
def get_status(self): |
|
if self.start_time is not None: |
|
current = self.get_elapsed() |
|
last = f" (précédent: {self.last_duration:.2f}s)" if self.last_duration else "" |
|
return f"En cours... {current:.2f}s{last}" |
|
elif self.last_duration: |
|
return f"Terminé en {self.last_duration:.2f}s" |
|
return "En attente..." |
|
|
|
def generate_text(model_path, prompt, temperature=0.7, max_tokens=2048): |
|
try: |
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_path, |
|
torch_dtype=torch.float32, |
|
device_map="auto" |
|
) |
|
model.eval() |
|
|
|
chat = [{"role": "user", "content": prompt}] |
|
formatted_prompt = tokenizer.apply_chat_template( |
|
chat, |
|
tokenize=False, |
|
add_generation_prompt=True |
|
) |
|
|
|
inputs = tokenizer( |
|
formatted_prompt, |
|
return_tensors="pt", |
|
truncation=True, |
|
max_length=4096 |
|
).to(model.device) |
|
|
|
with torch.no_grad(): |
|
outputs = model.generate( |
|
**inputs, |
|
max_new_tokens=max_tokens, |
|
temperature=temperature, |
|
do_sample=True, |
|
pad_token_id=tokenizer.eos_token_id |
|
) |
|
|
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
except Exception as e: |
|
print(f"Erreur lors de la génération: {str(e)}") |
|
raise |
|
|
|
def generate_skeleton(model_name, text, temperature, max_tokens): |
|
"""Génère le squelette de la présentation""" |
|
try: |
|
timer.start() |
|
|
|
model_path = TEXT_MODELS.get(model_name, DEFAULT_MODEL) |
|
full_prompt = PREPROMPT + "\n\n" + text |
|
generated_content = generate_text(model_path, full_prompt, temperature, max_tokens) |
|
|
|
status = timer.get_status() |
|
timer.stop() |
|
|
|
return status, generated_content, gr.update(visible=True) |
|
|
|
except Exception as e: |
|
timer.stop() |
|
error_msg = f"Erreur: {str(e)}" |
|
print(error_msg) |
|
return error_msg, None, gr.update(visible=False) |
|
|
|
def create_presentation_file(generated_content): |
|
"""Crée le fichier PowerPoint à partir du contenu généré""" |
|
try: |
|
timer.start() |
|
generator = PresentationGenerator() |
|
|
|
slides = generator.parse_presentation_content(generated_content) |
|
prs = generator.create_presentation(slides) |
|
|
|
output_path = os.path.join(os.getcwd(), "presentation.pptx") |
|
prs.save(output_path) |
|
|
|
timer.stop() |
|
return output_path |
|
|
|
except Exception as e: |
|
timer.stop() |
|
print(f"Erreur lors de la création du fichier: {str(e)}") |
|
return None |
|
|
|
|
|
timer = ExecutionTimer() |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Glass()) as demo: |
|
gr.Markdown( |
|
""" |
|
# Générateur de Présentations PowerPoint IA |
|
|
|
Créez des présentations professionnelles automatiquement avec l'aide de l'IA. |
|
""" |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
model_selector = gr.Dropdown( |
|
choices=list(TEXT_MODELS.keys()) if TEXT_MODELS else ["Granite"], |
|
value="Granite" if not TEXT_MODELS else list(TEXT_MODELS.keys())[0], |
|
label="Modèle de texte" |
|
) |
|
temperature = gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.7, |
|
step=0.1, |
|
label="Température" |
|
) |
|
max_tokens = gr.Slider( |
|
minimum=1000, |
|
maximum=4096, |
|
value=2048, |
|
step=256, |
|
label="Tokens maximum" |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
input_text = gr.Textbox( |
|
lines=10, |
|
label="Votre texte", |
|
placeholder="Décrivez le contenu que vous souhaitez pour votre présentation..." |
|
) |
|
|
|
with gr.Row(): |
|
generate_skeleton_btn = gr.Button("Générer le Squelette de la Présentation", variant="primary") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
status_output = gr.Textbox( |
|
label="Statut", |
|
lines=2, |
|
value="En attente..." |
|
) |
|
generated_content = gr.Textbox( |
|
label="Contenu généré", |
|
lines=10, |
|
show_copy_button=True |
|
) |
|
create_presentation_btn = gr.Button("Créer Présentation", visible=False) |
|
output_file = gr.File( |
|
label="Présentation PowerPoint", |
|
type="filepath" |
|
) |
|
|
|
generate_skeleton_btn.click( |
|
fn=generate_skeleton, |
|
inputs=[ |
|
model_selector, |
|
input_text, |
|
temperature, |
|
max_tokens |
|
], |
|
outputs=[ |
|
status_output, |
|
generated_content, |
|
create_presentation_btn |
|
] |
|
) |
|
|
|
create_presentation_btn.click( |
|
fn=create_presentation_file, |
|
inputs=[generated_content], |
|
outputs=[output_file] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|
|
|
|
|
|
|
|
|
|
|