Spaces:
Runtime error
Runtime error
Commit
·
9cccd4c
1
Parent(s):
3f7a128
Update app.py
Browse files
app.py
CHANGED
|
@@ -30,6 +30,18 @@ Answer:"""
|
|
| 30 |
response += new_text
|
| 31 |
yield response
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
with gr.Blocks(
|
| 35 |
theme=gr.themes.Soft(),
|
|
@@ -110,6 +122,8 @@ with gr.Blocks(
|
|
| 110 |
)
|
| 111 |
with gr.Row():
|
| 112 |
submit = gr.Button("Submit")
|
|
|
|
|
|
|
| 113 |
with gr.Row():
|
| 114 |
with gr.Box():
|
| 115 |
gr.Markdown("**MPT-7B-Instruct**")
|
|
@@ -148,4 +162,17 @@ with gr.Blocks(
|
|
| 148 |
outputs=output_7b,
|
| 149 |
)
|
| 150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
demo.queue(max_size=4, concurrency_count=1).launch(debug=True)
|
|
|
|
| 30 |
response += new_text
|
| 31 |
yield response
|
| 32 |
|
| 33 |
+
def process_generate(instruction, temperature, top_p, top_k, max_new_tokens, seed):
|
| 34 |
+
|
| 35 |
+
prompt=f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 36 |
+
### Instruction:
|
| 37 |
+
{instruction}
|
| 38 |
+
### Response:
|
| 39 |
+
Answer:"""
|
| 40 |
+
generation_config = GenerationConfig(seed=seed,temperature=temperature,top_p=top_p,top_k=top_k,max_new_tokens=max_new_tokens)
|
| 41 |
+
response = ""
|
| 42 |
+
response = model.generate(prompt=prompt,generation_config=generation_config)
|
| 43 |
+
return response
|
| 44 |
+
|
| 45 |
|
| 46 |
with gr.Blocks(
|
| 47 |
theme=gr.themes.Soft(),
|
|
|
|
| 122 |
)
|
| 123 |
with gr.Row():
|
| 124 |
submit = gr.Button("Submit")
|
| 125 |
+
with gr.Row():
|
| 126 |
+
full_generate = gr.Button("Generate")
|
| 127 |
with gr.Row():
|
| 128 |
with gr.Box():
|
| 129 |
gr.Markdown("**MPT-7B-Instruct**")
|
|
|
|
| 162 |
outputs=output_7b,
|
| 163 |
)
|
| 164 |
|
| 165 |
+
submit.full_generate(
|
| 166 |
+
process_generate,
|
| 167 |
+
inputs=[instruction, temperature, top_p, top_k, max_new_tokens,seed],
|
| 168 |
+
outputs=output_7b,
|
| 169 |
+
)
|
| 170 |
+
instruction.full_generate(
|
| 171 |
+
process_generate,
|
| 172 |
+
inputs=[instruction, temperature, top_p, top_k, max_new_tokens,seed],
|
| 173 |
+
outputs=output_7b,
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
demo.queue(max_size=4, concurrency_count=1).launch(debug=True)
|