juancmamacias commited on
Commit
8c164e7
·
verified ·
1 Parent(s): 8ba48f2

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -0
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Para Spaces de Hugging Face, la app principal debe llamarse app.py
2
+ # Este archivo es una copia de ejemplo_2_.py
3
+
4
+ import streamlit as st
5
+ from transformers import (
6
+ AutoTokenizer,
7
+ AutoModelForSeq2SeqLM,
8
+ AutoModelForSequenceClassification,
9
+ pipeline
10
+ )
11
+ import torch
12
+
13
+ st.set_page_config(page_title="SLM Demo: QA + Sentiment", page_icon="🧠")
14
+ st.title("🧠 Small Language Models Demo")
15
+ st.markdown("""
16
+ Esta app compara tres Small Language Models:
17
+ - `flan-t5-small` para responder preguntas.
18
+ - `distilBERT` para análisis de sentimiento.
19
+ - `distilBERT` fine-tuned para análisis de sentimiento.
20
+ """)
21
+
22
+ # Cargar modelos y tokenizers
23
+ @st.cache_resource
24
+ def load_models():
25
+ # FLAN-T5 para QA
26
+ flan_name = "google/flan-t5-small"
27
+ flan_tokenizer = AutoTokenizer.from_pretrained(flan_name)
28
+ flan_model = AutoModelForSeq2SeqLM.from_pretrained(flan_name)
29
+
30
+ # DistilBERT para clasificación (modelo base)
31
+ distil_name = "distilbert-base-uncased-finetuned-sst-2-english"
32
+ sentiment_analyzer = pipeline("sentiment-analysis", model=distil_name)
33
+
34
+ # DistilBERT fine-tuned propio
35
+ custom_name = "juancmamacias/jd-jcms"
36
+ custom_analyzer = pipeline("sentiment-analysis", model=custom_name)
37
+
38
+ return flan_tokenizer, flan_model, sentiment_analyzer, custom_analyzer
39
+
40
+ flan_tokenizer, flan_model, sentiment_analyzer, custom_analyzer = load_models()
41
+
42
+ # Historial de preguntas
43
+ if "history" not in st.session_state:
44
+ st.session_state.history = []
45
+
46
+ # Entrada del usuario
47
+ question = st.text_input("💬 Escribe una pregunta o frase para analizar:")
48
+
49
+ if st.button("Procesar") and question:
50
+ with st.spinner("Procesando..."):
51
+
52
+ # ➤ Respuesta con FLAN-T5
53
+ input_ids = flan_tokenizer(question, return_tensors="pt").input_ids
54
+ outputs = flan_model.generate(input_ids, max_length=50)
55
+ flan_answer = flan_tokenizer.decode(outputs[0], skip_special_tokens=True)
56
+
57
+ # ➤ Clasificación con DistilBERT base
58
+ sentiment = sentiment_analyzer(question)[0]
59
+ sentiment_label = sentiment['label']
60
+ sentiment_score = round(sentiment['score'], 3)
61
+
62
+ # ➤ Clasificación con DistilBERT fine-tuned propio
63
+ custom_sentiment = custom_analyzer(question)[0]
64
+ custom_label = custom_sentiment['label']
65
+ custom_score = round(custom_sentiment['score'], 3)
66
+
67
+ # Guardar en historial
68
+ st.session_state.history.append({
69
+ "question": question,
70
+ "answer": flan_answer,
71
+ "sentiment": f"{sentiment_label} ({sentiment_score})",
72
+ "custom_sentiment": f"{custom_label} ({custom_score})"
73
+ })
74
+
75
+ # Mostrar historial
76
+ if st.session_state.history:
77
+ st.markdown("### 📜 Historial")
78
+ for i, item in enumerate(reversed(st.session_state.history), 1):
79
+ st.markdown(f"""
80
+ **{i}. Entrada:** {item['question']}
81
+ 🧠 **Respuesta (FLAN):** {item['answer']}
82
+ ❤️ **Sentimiento (base):** {item['sentiment']}
83
+ 💙 **Sentimiento (propio):** {item['custom_sentiment']}
84
+ ---""")