AiiTServices / src /streamlit_app.py.old
HenriqueBraz's picture
Rename src/streamlit_app.py to src/streamlit_app.py.old
1627dd2 verified
raw
history blame
4.47 kB
import streamlit as st
from transformers import pipeline
import torch
from PIL import Image
import io
import librosa
import numpy as np
import logging
import tempfile
import os
from streamlit.runtime.uploaded_file_manager import UploadedFile
from diffusers import DiffusionPipeline
# Configurar página
st.set_page_config(
page_title="Aplicação de IA Multi-Modal",
page_icon="🤖",
layout="wide"
)
# Configurar logging
logging.basicConfig(
filename='app_errors.log',
level=logging.ERROR,
format='%(asctime)s - %(levelname)s - %(message)s'
)
# Cache para evitar recarregar modelos a cada execução
@st.cache_resource(show_spinner=False)
def load_models():
\"\"\"Carrega todos os modelos com cache para melhor performance\"\"\"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Usando dispositivo: {device}")
models = {}
try:
# Modelos de texto
models['sentiment_analysis'] = pipeline(
"sentiment-analysis",
model="cardiffnlp/twitter-roberta-base-sentiment-latest",
device=device
)
models['text_classification'] = pipeline(
"text-classification",
model="distilbert-base-uncased-finetuned-sst-2-english",
device=device
)
models['summarization'] = pipeline(
"summarization",
model="facebook/bart-large-cnn",
device=device,
max_length=150,
min_length=30
)
models['question_answering'] = pipeline(
"question-answering",
model="deepset/roberta-base-squad2",
device=device
)
models['translation'] = pipeline(
"translation",
model="Helsinki-NLP/opus-mt-tc-big-en-pt",
device=device
)
models['text_generation'] = pipeline(
"text-generation",
model="gpt2",
device=device,
max_length=100
)
models['ner'] = pipeline(
"ner",
model="dbmdz/bert-large-cased-finetuned-conll03-english",
device=device,
aggregation_strategy="simple"
)
# Modelos de imagem
models['image_classification'] = pipeline(
"image-classification",
model="google/vit-base-patch16-224",
device=device
)
models['object_detection'] = pipeline(
"object-detection",
model="facebook/detr-resnet-50",
device=device
)
models['image_segmentation'] = pipeline(
"image-segmentation",
model="facebook/detr-resnet-50-panoptic",
device=device
)
models['facial_recognition'] = pipeline(
"image-classification",
model="mo-thecreator/vit-Facial-Expression-Recognition",
device=device
)
# Modelos de áudio
models['speech_to_text'] = pipeline(
"automatic-speech-recognition",
model="openai/whisper-base",
device=device
)
models['audio_classification'] = pipeline(
"audio-classification",
model="superb/hubert-base-superb-er",
device=device
)
# Modelos generativos
models['text_to_image'] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16"
)
except Exception as e:
st.error(f"Erro crítico ao carregar modelos: {str(e)}")
logging.exception("Erro ao carregar modelos")
return {}
return models
# (As outras funções ficam iguais ao teu app original)
def main():
st.title("🤖 Aplicação de IA Multi-Modal Avançada")
st.markdown("---")
# Carregar modelos
with st.spinner("Carregando modelos de IA... (Isso pode levar alguns minutos na primeira execução)"):
models = load_models()
if not models:
st.error("Falha crítica ao carregar os modelos. Verifique os logs para mais detalhes.")
return
st.write("✅ Aplicação carregada com sucesso!")
# (Resto do teu código)
if __name__ == "__main__":
main()