currentlyexhausted commited on
Commit
30f529c
1 Parent(s): 16d2f60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -31
app.py CHANGED
@@ -1,33 +1,3 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, T5ForConditionalGeneration
3
 
4
- model_name = "allenai/t5-small-squad2-question-generation"
5
- tokenizer = AutoTokenizer.from_pretrained(model_name)
6
- model = T5ForConditionalGeneration.from_pretrained(model_name)
7
-
8
- def generate_questions(input_string, max_length=80, temperature=1.0, num_return_sequences=2,
9
- num_beams=4, top_k=90, top_p=0.9):
10
- input_ids = tokenizer.encode(input_string, return_tensors="pt")
11
- res = model.generate(input_ids, max_length=max_length, num_return_sequences=num_return_sequences,
12
- num_beams=num_beams, temperature=temperature, top_k=top_k, top_p=top_p)
13
- output = tokenizer.batch_decode(res, skip_special_tokens=True)
14
- return output
15
-
16
- input_text = gr.inputs.Textbox(label="Enter some text:", default="Nicejob has increased our revenue 80% since signing up")
17
- max_length = gr.inputs.Slider(10, 150, 80, label="Max Length")
18
- temperature = gr.inputs.Slider(0.0, 1.0, 1.0, step=0.05, label="Temperature")
19
- num_return_sequences = gr.inputs.Slider(1, 10, 2, label="Num Return Sequences")
20
- num_beams = gr.inputs.Slider(1, 10, 4, label="Num Beams")
21
- top_k = gr.inputs.Slider(0, 100, 90, label="Top-k")
22
- top_p = gr.inputs.Slider(0.0, 1.0, 0.9, step=0.05, label="Top-p")
23
- output_text = gr.outputs.Textbox(label="Generated questions:")
24
-
25
- iface = gr.Interface(
26
- generate_questions,
27
- inputs=[input_text, max_length, temperature, num_return_sequences, num_beams, top_k, top_p],
28
- outputs=output_text,
29
- title="Question Generation",
30
- description="Generate questions from text using the T5-SQuAD2 model.",
31
- )
32
-
33
- iface.launch()
 
1
  import gradio as gr
 
2
 
3
+ gr.Interface.load("models/allenai/t5-small-squad2-question-generation").launch()