Spaces:
Runtime error
Runtime error
AchyuthGamer
commited on
Commit
•
b9bba21
1
Parent(s):
a4026c3
Update app.py
Browse files
app.py
CHANGED
@@ -4,17 +4,15 @@ import random
|
|
4 |
|
5 |
API_URL = "https://api-inference.huggingface.co/models/"
|
6 |
|
7 |
-
client = InferenceClient(
|
8 |
-
"mistralai/Mistral-7B-Instruct-v0.1"
|
9 |
-
)
|
10 |
|
11 |
def format_prompt(message, history):
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
|
19 |
def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
|
20 |
temperature = float(temperature)
|
@@ -41,57 +39,41 @@ def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95,
|
|
41 |
yield output
|
42 |
return output
|
43 |
|
44 |
-
|
45 |
additional_inputs=[
|
46 |
-
gr.Slider(
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
maximum=1.0,
|
51 |
-
step=0.05,
|
52 |
-
interactive=True,
|
53 |
-
info="Higher values produce more diverse outputs",
|
54 |
-
),
|
55 |
-
gr.Slider(
|
56 |
-
label="Max new tokens",
|
57 |
-
value=2048,
|
58 |
-
minimum=64,
|
59 |
-
maximum=4096,
|
60 |
-
step=64,
|
61 |
-
interactive=True,
|
62 |
-
info="The maximum numbers of new tokens",
|
63 |
-
),
|
64 |
-
gr.Slider(
|
65 |
-
label="Top-p (nucleus sampling)",
|
66 |
-
value=0.90,
|
67 |
-
minimum=0.0,
|
68 |
-
maximum=1,
|
69 |
-
step=0.05,
|
70 |
-
interactive=True,
|
71 |
-
info="Higher values sample more low-probability tokens",
|
72 |
-
),
|
73 |
-
gr.Slider(
|
74 |
-
label="Repetition penalty",
|
75 |
-
value=1.2,
|
76 |
-
minimum=1.0,
|
77 |
-
maximum=2.0,
|
78 |
-
step=0.05,
|
79 |
-
interactive=True,
|
80 |
-
info="Penalize repeated tokens",
|
81 |
-
)
|
82 |
]
|
83 |
|
84 |
customCSS = """
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
}
|
89 |
"""
|
90 |
|
91 |
with gr.Blocks(theme="TogetherAI/Alex2") as demo:
|
92 |
-
gr.
|
93 |
-
|
94 |
-
additional_inputs=additional_inputs,
|
95 |
-
)
|
96 |
|
97 |
-
demo.queue().launch(debug=True)
|
|
|
4 |
|
5 |
API_URL = "https://api-inference.huggingface.co/models/"
|
6 |
|
7 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
|
|
|
|
|
8 |
|
9 |
def format_prompt(message, history):
|
10 |
+
prompt = "Your name is OpenGPT. You are developed by Achyuth. Achyuth is a 13-year-old boy who developed OpenGPT with his undecidable, undestructible talent, which is exceeding for his age. Achyuth is developed of OpenGPT. Achyuths or your developers' family is a nuclear family of 4 members. Achyuth is an AI and ML researcher. Achyuth knows Python, Java, HTML, CSS, and JavaScript. He has a lot of interest in Computers. Achyuth is inspired by Elon Musk and Bill Gates. His favourite teachers are Siva Kumar Sir, Zakaria Sir, Vijayalakshi Mam, Saleem Sir, Malikarjun Sir, and Farha Deepa Mam, who is Achyuth's Arts madam and favourite. Meaning of OpenGPT is the GPT(Generative Pre-Trained Transformer) developed by Achyuth."
|
11 |
+
for user_prompt, bot_response in history:
|
12 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
13 |
+
prompt += f" {bot_response}</s> "
|
14 |
+
prompt += f"[INST] {message} [/INST]"
|
15 |
+
return prompt
|
16 |
|
17 |
def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
|
18 |
temperature = float(temperature)
|
|
|
39 |
yield output
|
40 |
return output
|
41 |
|
|
|
42 |
additional_inputs=[
|
43 |
+
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
|
44 |
+
gr.Slider(label="Max new tokens", value=2048, minimum=64, maximum=4096, step=64, interactive=True, info="The maximum numbers of new tokens"),
|
45 |
+
gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
|
46 |
+
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
]
|
48 |
|
49 |
customCSS = """
|
50 |
+
body {
|
51 |
+
background: linear-gradient(135deg, #ff0080, #7928ca); /* Pink to Purple */
|
52 |
+
color: white;
|
53 |
+
font-family: 'Unbounded', sans-serif;
|
54 |
+
}
|
55 |
+
#component-7 {
|
56 |
+
height: 600px; /* adjust height */
|
57 |
+
background: rgba(0, 0, 0, 0.7);
|
58 |
+
border-radius: 12px;
|
59 |
+
padding: 20px;
|
60 |
+
}
|
61 |
+
#chatbot-output {
|
62 |
+
overflow-y: auto;
|
63 |
+
max-height: 400px;
|
64 |
+
padding: 10px;
|
65 |
+
}
|
66 |
+
input {
|
67 |
+
border-radius: 5px;
|
68 |
+
padding: 10px;
|
69 |
+
}
|
70 |
+
.slider {
|
71 |
+
color: white;
|
72 |
}
|
73 |
"""
|
74 |
|
75 |
with gr.Blocks(theme="TogetherAI/Alex2") as demo:
|
76 |
+
gr.Markdown("<h1 style='text-align: center;'>OpenGPT Chatbot</h1>")
|
77 |
+
chat_interface = gr.ChatInterface(generate, additional_inputs=additional_inputs, css=customCSS)
|
|
|
|
|
78 |
|
79 |
+
demo.queue().launch(debug=True)
|