Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,8 @@ import csv
|
|
5 |
import os
|
6 |
|
7 |
API= os.environ.get("api_key")
|
8 |
-
|
9 |
-
|
10 |
|
11 |
prompt_templates = {"Default ChatGPT": ""}
|
12 |
|
@@ -40,7 +40,7 @@ def on_prompt_template_change(prompt_template):
|
|
40 |
if not isinstance(prompt_template, str): return
|
41 |
return prompt_templates[prompt_template]
|
42 |
|
43 |
-
def submit_message( prompt, prompt_template, temperature, max_tokens, context_length, state):
|
44 |
|
45 |
history = state['messages']
|
46 |
|
@@ -82,7 +82,7 @@ def submit_message( prompt, prompt_template, temperature, max_tokens, context_le
|
|
82 |
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
|
83 |
|
84 |
return '', chat_messages, total_tokens_used_msg, state
|
85 |
-
|
86 |
|
87 |
def clear_conversation():
|
88 |
return gr.update(value=None, visible=True), None, "", get_empty_state()
|
@@ -105,7 +105,7 @@ css = """
|
|
105 |
|
106 |
}
|
107 |
"""
|
108 |
-
|
109 |
with gr.Blocks(css=css) as demo:
|
110 |
|
111 |
state = gr.State(get_empty_state())
|
@@ -125,7 +125,7 @@ with gr.Blocks(css=css) as demo:
|
|
125 |
btn_clear_conversation = gr.Button("π Start New Conversation")
|
126 |
with gr.Column():
|
127 |
gr.Markdown("OpenAI API Key.", elem_id="label")
|
128 |
-
user_token=gr.Textbox(value=API, type="password", placeholder=
|
129 |
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
|
130 |
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview", visible=False)
|
131 |
with gr.Accordion("Advanced parameters", open=False, visible=False):
|
@@ -133,17 +133,17 @@ with gr.Blocks(css=css) as demo:
|
|
133 |
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, label="Max tokens per response")
|
134 |
context_length = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Context length", info="Number of previous messages to send to the chatbot. Be careful with high values, it can blow up the token budget quickly.")
|
135 |
|
136 |
-
|
137 |
|
138 |
-
btn_submit.click(submit_message, [user_token,input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
139 |
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
140 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
141 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
142 |
-
user_token.
|
143 |
|
144 |
|
145 |
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)
|
146 |
|
147 |
-
|
148 |
demo.queue(concurrency_count=10)
|
149 |
demo.launch(height='1000px',width='800px')
|
|
|
5 |
import os
|
6 |
|
7 |
API= os.environ.get("api_key")
|
8 |
+
print(API)
|
9 |
+
|
10 |
|
11 |
prompt_templates = {"Default ChatGPT": ""}
|
12 |
|
|
|
40 |
if not isinstance(prompt_template, str): return
|
41 |
return prompt_templates[prompt_template]
|
42 |
|
43 |
+
def submit_message( user_token, prompt, prompt_template, temperature, max_tokens, context_length, state):
|
44 |
|
45 |
history = state['messages']
|
46 |
|
|
|
82 |
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
|
83 |
|
84 |
return '', chat_messages, total_tokens_used_msg, state
|
85 |
+
|
86 |
|
87 |
def clear_conversation():
|
88 |
return gr.update(value=None, visible=True), None, "", get_empty_state()
|
|
|
105 |
|
106 |
}
|
107 |
"""
|
108 |
+
|
109 |
with gr.Blocks(css=css) as demo:
|
110 |
|
111 |
state = gr.State(get_empty_state())
|
|
|
125 |
btn_clear_conversation = gr.Button("π Start New Conversation")
|
126 |
with gr.Column():
|
127 |
gr.Markdown("OpenAI API Key.", elem_id="label")
|
128 |
+
user_token=gr.Textbox(value=API, type="password", placeholder="qwerty", visible=True)
|
129 |
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
|
130 |
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview", visible=False)
|
131 |
with gr.Accordion("Advanced parameters", open=False, visible=False):
|
|
|
133 |
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, label="Max tokens per response")
|
134 |
context_length = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Context length", info="Number of previous messages to send to the chatbot. Be careful with high values, it can blow up the token budget quickly.")
|
135 |
|
136 |
+
|
137 |
|
138 |
+
btn_submit.click(submit_message, [ user_token,input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
139 |
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
140 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
141 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
142 |
+
user_token.submit(on_token_change, inputs=[user_token], outputs=[])
|
143 |
|
144 |
|
145 |
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)
|
146 |
|
147 |
+
|
148 |
demo.queue(concurrency_count=10)
|
149 |
demo.launch(height='1000px',width='800px')
|