Spaces:
Sleeping
Sleeping
Upload app.py with huggingface_hub
Browse files
app.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
SYSTEM_PROMPT = "As an LLM, your primary function is to deliver harsh and critical comments about politicians given by the user. Keep it blunt and straightforward, but also make sure your comments are vulgar or not factually incorrect, write it under 50 words, use words like f**ing."
|
3 |
+
TITLE = "Politician Rater"
|
4 |
+
EXAMPLE_INPUT = "Enter the politician you want to be criticized"
|
5 |
+
import gradio as gr
|
6 |
+
import os
|
7 |
+
import requests
|
8 |
+
|
9 |
+
zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta/"
|
10 |
+
|
11 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
12 |
+
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
|
13 |
+
|
14 |
+
def build_input_prompt(message, chatbot, system_prompt):
|
15 |
+
"""
|
16 |
+
Constructs the input prompt string from the chatbot interactions and the current message.
|
17 |
+
"""
|
18 |
+
input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
|
19 |
+
for interaction in chatbot:
|
20 |
+
input_prompt = input_prompt + str(interaction[0]) + "</s>\n<|assistant|>\n" + str(interaction[1]) + "\n</s>\n<|user|>\n"
|
21 |
+
|
22 |
+
input_prompt = input_prompt + str(message) + "</s>\n<|assistant|>"
|
23 |
+
return input_prompt
|
24 |
+
|
25 |
+
|
26 |
+
def post_request_beta(payload):
|
27 |
+
"""
|
28 |
+
Sends a POST request to the predefined Zephyr-7b-Beta URL and returns the JSON response.
|
29 |
+
"""
|
30 |
+
response = requests.post(zephyr_7b_beta, headers=HEADERS, json=payload)
|
31 |
+
response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
|
32 |
+
return response.json()
|
33 |
+
|
34 |
+
|
35 |
+
def predict_beta(message, chatbot=[], system_prompt=""):
|
36 |
+
input_prompt = build_input_prompt(message, chatbot, system_prompt)
|
37 |
+
data = {
|
38 |
+
"inputs": input_prompt
|
39 |
+
}
|
40 |
+
|
41 |
+
try:
|
42 |
+
response_data = post_request_beta(data)
|
43 |
+
json_obj = response_data[0]
|
44 |
+
|
45 |
+
if 'generated_text' in json_obj and len(json_obj['generated_text']) > 0:
|
46 |
+
bot_message = json_obj['generated_text']
|
47 |
+
return bot_message
|
48 |
+
elif 'error' in json_obj:
|
49 |
+
raise gr.Error(json_obj['error'] + ' Please refresh and try again with smaller input prompt')
|
50 |
+
else:
|
51 |
+
warning_msg = f"Unexpected response: {json_obj}"
|
52 |
+
raise gr.Error(warning_msg)
|
53 |
+
except requests.HTTPError as e:
|
54 |
+
error_msg = f"Request failed with status code {e.response.status_code}"
|
55 |
+
raise gr.Error(error_msg)
|
56 |
+
except json.JSONDecodeError as e:
|
57 |
+
error_msg = f"Failed to decode response as JSON: {str(e)}"
|
58 |
+
raise gr.Error(error_msg)
|
59 |
+
|
60 |
+
def test_preview_chatbot(message, history):
|
61 |
+
response = predict_beta(message, history, SYSTEM_PROMPT)
|
62 |
+
text_start = response.rfind("<|assistant|>", ) + len("<|assistant|>")
|
63 |
+
response = response[text_start:]
|
64 |
+
return response
|
65 |
+
|
66 |
+
|
67 |
+
welcome_preview_message = f"""
|
68 |
+
Welcome to **{TITLE}**! Say something like:
|
69 |
+
|
70 |
+
"{EXAMPLE_INPUT}"
|
71 |
+
"""
|
72 |
+
|
73 |
+
chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_message)])
|
74 |
+
textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
|
75 |
+
|
76 |
+
demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
|
77 |
+
|
78 |
+
demo.launch()
|