import gradio as gr from transformers import pipeline # ✅ Sentiment Analysis sentiment_pipeline = pipeline("sentiment-analysis") def analyze_sentiment(text): result = sentiment_pipeline(text)[0] return f"{result['label']} ({result['score']:.2f})" # ✅ Toxic Comment Detection (uses a toxicity model from Hugging Face) toxic_pipeline = pipeline("text-classification", model="unitary/toxic-bert") def detect_toxic(text): result = toxic_pipeline(text)[0] return f"{result['label']} ({result['score']:.2f})" # ✅ Image Captioning Model (BLIP) image_captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") def caption_image(image): result = image_captioner(image)[0]['generated_text'] return result # ✅ Speech-to-Text Model (whisper) speech_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-tiny") def speech_to_text(audio): result = speech_pipeline(audio) return result['text'] # 🟡 Placeholder for Style Transfer (optional upgrade later) def style_transfer(image, style): return image # Replace with real model later # ✅ Gradio App Setup with gr.Blocks() as demo: gr.Markdown("# 🚀 ML Playground Dashboard") with gr.Tab("Sentiment Analyzer"): gr.Interface(fn=analyze_sentiment, inputs=gr.Textbox(), outputs=gr.Textbox()) with gr.Tab("Toxic Comment Detector"): gr.Interface(fn=detect_toxic, inputs=gr.Textbox(), outputs=gr.Textbox()) with gr.Tab("Image Caption Generator"): gr.Interface(fn=caption_image, inputs=gr.Image(type="pil"), outputs=gr.Textbox()) with gr.Tab("Speech-to-Text"): gr.Interface(fn=speech_to_text, inputs=gr.Audio(type="filepath"), outputs=gr.Textbox()) with gr.Tab("Art Style Transfer"): gr.Interface( fn=style_transfer, inputs=[gr.Image(), gr.Dropdown(["Van Gogh", "Monet", "Picasso"])], outputs=gr.Image() ) demo.launch()