Spaces:
Running
Running
File size: 2,928 Bytes
5e20e58 a553d7d ca6c9ce e8a3ed8 ca6c9ce b130785 ca6c9ce b130785 ca6c9ce 6a65ad1 b25a065 ca6c9ce 5e20e58 574cab5 b480894 5e20e58 af3b18b 5e20e58 ca6c9ce b480894 e0f8449 3292ea1 e0f8449 5e20e58 4b1c971 212a5f9 e8a3ed8 868a505 e8a3ed8 5e20e58 574cab5 ca6c9ce 5158f90 b480894 e0f8449 4f97373 e0f8449 35e7167 e8a3ed8 5e20e58 3292ea1 cdeab6e 5e20e58 b480894 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import re
# Load the tokenizer and model
model_name = "mohamedemam/QA_GeneraToR"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model.eval()
import wikipediaapi
# Create a Wikipedia API instance
wiki_wiki = wikipediaapi.Wikipedia('MyProjectName ([email protected])', 'en')
page_py = wiki_wiki.page('Leo messi')
example_contexts=page_py.text.split(f"\n")
for i in range(len(example_contexts)):
example_contexts[i]=re.sub(f'\n'," ", example_contexts[i])
# Recommended words for users to choose from
recommended_words = [
"did",
"what",
"how",
"what was",
"was",
"when",
"who",
"what did",
"are",
"where",
"what is",
"why",
"",
"were",
"is",
"what were",
"which",
"what are",
"does",
"what does",
"has",
"can",
"do",
"in what",
"what can",
"what do",
"have",
"what has",
"had",
"on what",
"whom",
"for what",
"could",
"what have",
"what had",
"if"
]
# Function to generate questions and answers with configurable parameters
def generate_qa(text,context, recommended_word, temperature, top_p,num_seq,l_p, num_b):
input_text = f"{recommended_word}: {text+context}"
input_text=re.sub(f'\n'," ",input_text).lower()
input_ids = tokenizer(input_text, return_tensors='pt')
# Generate with configurable parameters
output = model.generate(
**input_ids,
temperature=temperature,
top_p=top_p,
num_return_sequences=num_seq,
max_length=100,
num_beams=num_b,
length_penalty=l_p, do_sample=True,
)
#
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
formatted_output = "\n\n".join(set(generated_text))
return formatted_output
iface = gr.Interface(
fn=generate_qa,
inputs=["text",
gr.inputs.Dropdown([" "]+example_contexts, label="Choose an Example"),
gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"),
gr.inputs.Slider(minimum=0.01, maximum=5, default=3, step=.01, label="l_p")
,
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of beams"),
],
outputs=gr.outputs.Textbox(label="Generated Output"),
title="Question Generation and Answering",
description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
)
# Launch the interface
iface.launch() |