Spaces:
Sleeping
Sleeping
| import os | |
| import requests | |
| import streamlit as st | |
| # Retrieve API keys from environment variables | |
| HF_TOKEN = os.getenv("HF_TOKEN", "default_hf_token") | |
| # Initialize Hugging Face API endpoint | |
| HF_MODEL_URL = "https://api-inference.huggingface.co/models/Xenova/gpt-3.5-turbo" | |
| # Function to get response from Hugging Face model | |
| def get_response(user_query: str) -> str: | |
| """Get a response from the Hugging Face model for the given user query.""" | |
| try: | |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| payload = {"inputs": user_query} | |
| response = requests.post(HF_MODEL_URL, headers=headers, json=payload) | |
| response.raise_for_status() | |
| result = response.json() | |
| # Check if result is a list and handle accordingly | |
| if isinstance(result, list): | |
| response_text = result[0].get("generated_text", "No response generated.") | |
| else: | |
| response_text = "Unexpected response format." | |
| return response_text | |
| except Exception as e: | |
| return f"Error: {e}" | |
| # Streamlit UI for customer support chatbot | |
| st.title("Customer Support Chatbot") | |
| user_query = st.text_input("Enter your query:", "") | |
| if st.button("Get Response"): | |
| with st.spinner("Processing..."): | |
| try: | |
| # Call the get_response function | |
| response = get_response(user_query) | |
| st.subheader("Chatbot Response") | |
| st.write(response) | |
| except Exception as e: | |
| st.error(f"Error fetching response: {e}") | |