Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -9,8 +9,16 @@ TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") | |
| 9 | 
             
            if not TOKEN:
         | 
| 10 | 
             
                raise ValueError("API token is not set. Please set the HUGGINGFACE_API_TOKEN environment variable.")
         | 
| 11 |  | 
|  | |
|  | |
| 12 | 
             
            memory = deque(maxlen=10)
         | 
| 13 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 14 | 
             
            async def respond(
         | 
| 15 | 
             
                message,
         | 
| 16 | 
             
                history: list[tuple[str, str]],
         | 
| @@ -19,7 +27,7 @@ async def respond( | |
| 19 | 
             
                temperature=0.7,
         | 
| 20 | 
             
                top_p=0.95,
         | 
| 21 | 
             
            ):
         | 
| 22 | 
            -
                system_prefix = "System:  | 
| 23 | 
             
                full_system_message = f"{system_prefix}{system_message}"
         | 
| 24 |  | 
| 25 | 
             
                memory.append((message, None))
         | 
| @@ -46,6 +54,13 @@ async def respond( | |
| 46 | 
             
                try:
         | 
| 47 | 
             
                    async with aiohttp.ClientSession() as session:
         | 
| 48 | 
             
                        async with session.post("https://api-inference.huggingface.co/v1/chat/completions", headers=headers, json=payload) as response:
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 49 | 
             
                            response_text = ""
         | 
| 50 | 
             
                            async for chunk in response.content:
         | 
| 51 | 
             
                                if chunk:
         | 
| @@ -62,6 +77,7 @@ async def respond( | |
| 62 | 
             
                            if not response_text:
         | 
| 63 | 
             
                                yield "I apologize, but I couldn't generate a response. Please try again."
         | 
| 64 | 
             
                except Exception as e:
         | 
|  | |
| 65 | 
             
                    yield f"An error occurred: {str(e)}"
         | 
| 66 |  | 
| 67 | 
             
                memory[-1] = (message, response_text)
         | 
| @@ -72,8 +88,6 @@ async def chat(message, history, system_message, max_tokens, temperature, top_p) | |
| 72 | 
             
                    response = chunk
         | 
| 73 | 
             
                    yield response
         | 
| 74 |  | 
| 75 | 
            -
             | 
| 76 | 
            -
                            
         | 
| 77 | 
             
            theme = "Nymbo/Nymbo_Theme"
         | 
| 78 |  | 
| 79 | 
             
            css = """
         | 
| @@ -95,4 +109,6 @@ demo = gr.ChatInterface( | |
| 95 | 
             
            )
         | 
| 96 |  | 
| 97 | 
             
            if __name__ == "__main__":
         | 
|  | |
|  | |
| 98 | 
             
                demo.queue().launch(max_threads=20)
         | 
|  | |
| 9 | 
             
            if not TOKEN:
         | 
| 10 | 
             
                raise ValueError("API token is not set. Please set the HUGGINGFACE_API_TOKEN environment variable.")
         | 
| 11 |  | 
| 12 | 
            +
            print(f"API Token: {TOKEN[:5]}...{TOKEN[-5:]}")  # Check API token
         | 
| 13 | 
            +
             | 
| 14 | 
             
            memory = deque(maxlen=10)
         | 
| 15 |  | 
| 16 | 
            +
            async def test_api():
         | 
| 17 | 
            +
                headers = {"Authorization": f"Bearer {TOKEN}"}
         | 
| 18 | 
            +
                async with aiohttp.ClientSession() as session:
         | 
| 19 | 
            +
                    async with session.get("https://api-inference.huggingface.co/models/mistralai/Mistral-Nemo-Instruct-2407", headers=headers) as response:
         | 
| 20 | 
            +
                        print(f"Test API response: {await response.text()}")
         | 
| 21 | 
            +
             | 
| 22 | 
             
            async def respond(
         | 
| 23 | 
             
                message,
         | 
| 24 | 
             
                history: list[tuple[str, str]],
         | 
|  | |
| 27 | 
             
                temperature=0.7,
         | 
| 28 | 
             
                top_p=0.95,
         | 
| 29 | 
             
            ):
         | 
| 30 | 
            +
                system_prefix = "System: Respond in the same language as the input (English, Korean, Chinese, Japanese, etc.)."
         | 
| 31 | 
             
                full_system_message = f"{system_prefix}{system_message}"
         | 
| 32 |  | 
| 33 | 
             
                memory.append((message, None))
         | 
|  | |
| 54 | 
             
                try:
         | 
| 55 | 
             
                    async with aiohttp.ClientSession() as session:
         | 
| 56 | 
             
                        async with session.post("https://api-inference.huggingface.co/v1/chat/completions", headers=headers, json=payload) as response:
         | 
| 57 | 
            +
                            print(f"Response status: {response.status}")
         | 
| 58 | 
            +
                            if response.status != 200:
         | 
| 59 | 
            +
                                error_text = await response.text()
         | 
| 60 | 
            +
                                print(f"Error response: {error_text}")
         | 
| 61 | 
            +
                                yield "An API response error occurred. Please try again."
         | 
| 62 | 
            +
                                return
         | 
| 63 | 
            +
             | 
| 64 | 
             
                            response_text = ""
         | 
| 65 | 
             
                            async for chunk in response.content:
         | 
| 66 | 
             
                                if chunk:
         | 
|  | |
| 77 | 
             
                            if not response_text:
         | 
| 78 | 
             
                                yield "I apologize, but I couldn't generate a response. Please try again."
         | 
| 79 | 
             
                except Exception as e:
         | 
| 80 | 
            +
                    print(f"Exception occurred: {str(e)}")
         | 
| 81 | 
             
                    yield f"An error occurred: {str(e)}"
         | 
| 82 |  | 
| 83 | 
             
                memory[-1] = (message, response_text)
         | 
|  | |
| 88 | 
             
                    response = chunk
         | 
| 89 | 
             
                    yield response
         | 
| 90 |  | 
|  | |
|  | |
| 91 | 
             
            theme = "Nymbo/Nymbo_Theme"
         | 
| 92 |  | 
| 93 | 
             
            css = """
         | 
|  | |
| 109 | 
             
            )
         | 
| 110 |  | 
| 111 | 
             
            if __name__ == "__main__":
         | 
| 112 | 
            +
                import asyncio
         | 
| 113 | 
            +
                asyncio.run(test_api())  # Run API test
         | 
| 114 | 
             
                demo.queue().launch(max_threads=20)
         | 
 
			
