import gradio as gr import asyncio import json import os import base64 from agent import ( create_doc_agent, create_image_agent, create_json_analyzer_agent, create_speech_agent, process_document_workflow, process_image_workflow, complete_analysis_workflow, tts_with_mcp, simulate_process_climate_document, simulate_analyze_image, simulate_analyze_json_data, simulate_text_to_speech, play_wav ) from mistralai import Mistral client = None agents = None # Climate Chat Agent ID CLIMATE_AGENT_ID = "ag:a6b86184:20250611:mistyclimate-climate-chat-agent:74bd7c2f" def initialize_client_and_agents(api_key: str): global client, agents if client is None or agents is None: try: client = Mistral(api_key=api_key) doc_agent = create_doc_agent(client) image_agent = create_image_agent(client) json_analyzer_agent = create_json_analyzer_agent(client) speech_agent = create_speech_agent(client) agents = { "doc_agent_id": doc_agent.id, "image_agent_id": image_agent.id, "json_analyzer_agent_id": json_analyzer_agent.id, "speech_agent_id": speech_agent.id, "climate_agent_id": CLIMATE_AGENT_ID } print("Agents initialized successfully.") print(f"Document Agent ID: {doc_agent.id}") print(f"Image Agent ID: {image_agent.id}") print(f"JSON Analyzer Agent ID: {json_analyzer_agent.id}") print(f"Speech Agent ID: {speech_agent.id}") print(f"Climate Chat Agent ID: {CLIMATE_AGENT_ID}") except Exception as e: return None, f"Error initializing client: {str(e)}" return client, agents # Function to handle climate chat def climate_chat(api_key: str, message: str, history): if not api_key: return history + [("User", message), ("Assistant", "Error: Please provide a valid API key.")] if not message.strip(): return history + [("User", message), ("Assistant", "Error: Please enter a message.")] client, agents_or_error = initialize_client_and_agents(api_key) if client is None: return history + [("User", message), ("Assistant", f"Error: {agents_or_error}")] try: # Use the climate agent for chat response = client.agents.complete( agent_id=CLIMATE_AGENT_ID, messages=[ { "role": "user", "content": message } ] ) if response and response.choices and response.choices[0].message: bot_response = response.choices[0].message.content return history + [("User", message), ("Assistant", bot_response)] else: return history + [("User", message), ("Assistant", "No response received from the climate agent.")] except Exception as e: return history + [("User", message), ("Assistant", f"Error: {str(e)}")] # Function to clear chat history def clear_chat(): return [] custom_css = """ body { background: #121212; color: #ffffff; } .gradio-container { background-color: #1e1e1e; border-radius: 12px; box-shadow: 0 4px 12px rgba(0,0,0,0.4); } h1, h2 { color: #80cbc4; } .gr-button { background-color: #26a69a; color: white; } .gr-button:hover { background-color: #00897b; } input, textarea, select { background-color: #2c2c2c !important; color: #ffffff; border: 1px solid #4db6ac; } .gr-file label { background-color: #26a69a; color: white; } .gr-audio { border-radius: 12px; box-shadow: 0 0 8px #4db6ac; } .climate-chat { border: 2px solid #4db6ac; border-radius: 12px; padding: 15px; margin-bottom: 20px; background: linear-gradient(135deg, #1e1e1e 0%, #2c2c2c 100%); } /* Avatar styling for emojis */ .gr-chatbot .avatar { border-radius: 50%; width: 40px; height: 40px; border: 2px solid #4db6ac; background-color: #2c2c2c; display: flex; align-items: center; justify-content: center; font-size: 24px; /* Adjust emoji size */ } .gr-chatbot .user .avatar { background-color: #26a69a; /* User avatar background */ } .gr-chatbot .assistant .avatar { background-color: #4db6ac; /* Assistant avatar background */ } """ # Function to handle document processing workflow async def run_document_workflow(api_key: str, file, document_type): if not api_key: return "Error: Please provide a valid API key." if file is None: return "Error: Please upload a document file." file_path = file.name client, agents_or_error = initialize_client_and_agents(api_key) if client is None: return agents_or_error try: response = await process_document_workflow(client, file_path, document_type) if response and response.choices and response.choices[0].message.tool_calls: for tool_call in response.choices[0].message.tool_calls: if tool_call.function.name == "process_climate_document": result = simulate_process_climate_document(file_path=file_path, document_type=document_type) return json.dumps(result, indent=2) return response.choices[0].message.content if response and response.choices else "No response received." except Exception as e: return f"Error: {str(e)}" # Function to handle image processing workflow async def run_image_workflow(api_key: str, image_file, analysis_focus): if not api_key: return "Error: Please provide a valid API key." if image_file is None: return "Error: Please upload an image file." image_path = image_file.name client, agents_or_error = initialize_client_and_agents(api_key) if client is None: return agents_or_error try: response = await process_image_workflow(client, image_path, analysis_focus) if response and response.choices and response.choices[0].message.tool_calls: for tool_call in response.choices[0].message.tool_calls: if tool_call.function.name == "analyze_image": with open(image_path, "rb") as f: image_data = base64.b64encode(f.read()).decode("utf-8") result = simulate_analyze_image(image_data, image_format="jpg", analysis_focus=analysis_focus) return json.dumps(result, indent=2) return response.choices[0].message.content if response and response.choices else "No response received." except Exception as e: return f"Error: {str(e)}" # Function to handle JSON analysis and speech workflow async def run_analysis_and_speech_workflow(api_key: str, json_input, analysis_type): if not api_key: return "Error: Please provide a valid API key.", None try: json_data = json.loads(json_input) client, agents_or_error = initialize_client_and_agents(api_key) if client is None: return agents_or_error, None json_response, speech_response = await complete_analysis_workflow(client, json_data, max_retries=3) output = [] if json_response and json_response.choices: output.append("JSON Analysis Response:") output.append(json_response.choices[0].message.content) for tool_call in json_response.choices[0].message.tool_calls or []: if tool_call.function.name == "analyze_json_data": analysis_result = simulate_analyze_json_data(json_data, analysis_type) output.append("Analysis Result:") output.append(json.dumps(analysis_result, indent=2)) if speech_response and speech_response.choices: output.append("\nSpeech Response:") output.append(speech_response.choices[0].message.content) for tool_call in speech_response.choices[0].message.tool_calls or []: if tool_call.function.name == "text_to_speech": analysis_text = "Climate analysis reveals significant warming trends with regional variations requiring immediate attention." audio_url = simulate_text_to_speech(analysis_text) output.append(f"Generated Audio URL: {audio_url}") play_result = play_wav(audio_url) output.append(f"Audio Play Result: {play_result}") if "file://" in audio_url: audio_path = audio_url.replace("file://", "") if os.path.exists(audio_path): return "\n".join(output), audio_path else: output.append("Error: Audio file not found.") return "\n".join(output), None except Exception as e: return f"Error: {str(e)}", None # Function to handle TTS workflow async def run_tts_workflow(api_key: str, text_input): if not api_key: return "Error: Please provide a valid API key.", None client, agents_or_error = initialize_client_and_agents(api_key) if client is None: return agents_or_error, None try: response = await tts_with_mcp(client, text_input) output = [] if response and response.choices: output.append("TTS Agent Response:") output.append(response.choices[0].message.content) for tool_call in response.choices[0].message.tool_calls or []: if tool_call.function.name == "text_to_speech": audio_url = simulate_text_to_speech(text=text_input) output.append(f"Generated Audio URL: {audio_url}") play_result = play_wav(audio_url) output.append(f"Audio Play Result: {play_result}") if "file://" in audio_url: audio_path = audio_url.replace("file://", "") if os.path.exists(audio_path): return "\n".join(output), audio_path else: output.append("Error: Audio file not found.") return "\n".join(output), None except Exception as e: return f"Error: {str(e)}", None # Gradio interface with gr.Blocks(css=custom_css) as demo: gr.Markdown("# 🌍 MistyClimate Multi-Agent System") gr.Markdown("## Advanced Climate Intelligence & Multi-Agent Processing Platform") gr.Markdown("Enter your Mistral API key and interact with our specialized climate chat agent and various processing systems.") api_key_input = gr.Textbox(label="🔑 Mistral API Key", type="password", placeholder="Enter your Mistral API key here") # Climate Chat Assistant Tab with Emoji Avatars with gr.Tab("🌡️ Climate Chat Assistant", elem_classes=["climate-chat"]): gr.Markdown("### Climate Intelligence Chat") gr.Markdown("Chat with our specialized climate AI assistant. Ask about climate science, environmental issues, sustainability practices, and get expert guidance on climate-related topics.") # Chatbot with emoji avatars chatbot = gr.Chatbot( label="Climate Chat", height=500, type="tuples", placeholder="Start chatting about climate topics...", avatar_images=("👤", "🌍") # User: person emoji, Assistant: globe emoji ) with gr.Row(): msg_input = gr.Textbox( label="Your Message", placeholder="Ask me about climate change, sustainability, renewable energy, or environmental policies...", scale=4 ) send_btn = gr.Button("Send 💬", scale=1) clear_btn = gr.Button("Clear Chat 🗑️", scale=1) # Sample questions for users gr.Markdown("### 💡 Try these sample questions:") sample_questions = [ "What are the main causes of climate change?", "How can I reduce my carbon footprint?", "Explain the Paris Climate Agreement", "What's the difference between weather and climate?", "Tell me about renewable energy options", "How does deforestation affect climate?" ] sample_buttons = [] with gr.Row(): for i, question in enumerate(sample_questions[:3]): btn = gr.Button(f"❓ {question}", scale=1) sample_buttons.append(btn) with gr.Row(): for i, question in enumerate(sample_questions[3:]): btn = gr.Button(f"❓ {question}", scale=1) sample_buttons.append(btn) # Event handlers for climate chat def respond(message, history, api_key): return climate_chat(api_key, message, history) def send_message(message, history, api_key): if not message.strip(): return message, history new_history = respond(message, history, api_key) return "", new_history def use_sample_question(question): return question send_btn.click(send_message, [msg_input, chatbot, api_key_input], [msg_input, chatbot]) msg_input.submit(send_message, [msg_input, chatbot, api_key_input], [msg_input, chatbot]) clear_btn.click(clear_chat, outputs=chatbot) # Connect sample question buttons for i, btn in enumerate(sample_buttons): btn.click(use_sample_question, inputs=gr.State(sample_questions[i]), outputs=msg_input) with gr.Tab("📄 Document Processing"): gr.Markdown("### Climate Document Analysis") gr.Markdown("Upload PDF documents for climate-focused analysis and processing.") doc_file = gr.File(label="Upload Document (PDF)", file_types=[".pdf"]) doc_type = gr.Dropdown(choices=["climate_report", "analysis", "data"], label="Document Type", value="climate_report") doc_button = gr.Button("🔍 Process Document") doc_output = gr.Textbox(label="Document Processing Output", lines=10) doc_button.click( fn=run_document_workflow, inputs=[api_key_input, doc_file, doc_type], outputs=doc_output ) with gr.Tab("🖼️ Image Analysis"): gr.Markdown("### Visual Climate Data Analysis") gr.Markdown("Analyze images, charts, and visual climate data using AI vision capabilities.") img_file = gr.File(label="Upload Image (PNG/JPG/PDF)", file_types=[".png", ".jpg", ".jpeg", ".pdf"]) analysis_focus = gr.Dropdown(choices=["text_extraction", "chart_analysis", "table_extraction"], label="Analysis Focus", value="chart_analysis") img_button = gr.Button("🔍 Analyze Image") img_output = gr.Textbox(label="Image Analysis Output", lines=10) img_button.click( fn=run_image_workflow, inputs=[api_key_input, img_file, analysis_focus], outputs=img_output ) with gr.Tab("📊 JSON Analysis & Speech"): gr.Markdown("### Climate Data Analysis & Audio Generation") gr.Markdown("Process climate JSON data and generate audio summaries of the analysis.") json_input = gr.Textbox(label="Climate JSON Data Input", lines=5, placeholder='{"temperature_data": [20.1, 20.5, 21.2, 21.8], "emissions": [400, 410, 415, 420], "regions": ["Global", "Arctic", "Tropical"], "year": 2024}') analysis_type = gr.Dropdown(choices=["statistical", "content", "structural"], label="Analysis Type", value="content") analysis_button = gr.Button("📈 Run Analysis & Generate Speech") analysis_output = gr.Textbox(label="Analysis and Speech Output", lines=10) audio_output = gr.Audio(label="Generated Audio Summary") analysis_button.click( fn=run_analysis_and_speech_workflow, inputs=[api_key_input, json_input, analysis_type], outputs=[analysis_output, audio_output] ) with gr.Tab("🎤 Text-to-Speech"): gr.Markdown("### Climate Audio Generation") gr.Markdown("Convert climate-related text to natural speech audio.") tts_input = gr.Textbox(label="Text Input", value="Climate change requires immediate global action to reduce greenhouse gas emissions and transition to renewable energy sources.") tts_button = gr.Button("🔊 Generate Speech") tts_output = gr.Textbox(label="TTS Output", lines=5) tts_audio = gr.Audio(label="Generated Audio") tts_button.click( fn=run_tts_workflow, inputs=[api_key_input, tts_input], outputs=[tts_output, tts_audio] ) # Footer information gr.Markdown("---") gr.Markdown("### 🌱 About MistyClimate") gr.Markdown("MistyClimate is an advanced multi-agent system powered by Mistral AI, designed to provide comprehensive climate intelligence, document processing, and multimedia analysis capabilities. Our specialized climate chat agent is trained on climate science and sustainability topics to provide accurate, actionable information.") if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=7860)