QA_GeneraToR / app.py
mohamedemam's picture
Update app.py
574cab5
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import re
# Load the tokenizer and model
model_name = "mohamedemam/QA_GeneraToR"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model.eval()
import wikipediaapi
# Create a Wikipedia API instance
wiki_wiki = wikipediaapi.Wikipedia('MyProjectName (merlin@example.com)', 'en')
page_py = wiki_wiki.page('Leo messi')
example_contexts=page_py.text.split(f"\n")
for i in range(len(example_contexts)):
example_contexts[i]=re.sub(f'\n'," ", example_contexts[i])
# Recommended words for users to choose from
recommended_words = [
"did",
"what",
"how",
"what was",
"was",
"when",
"who",
"what did",
"are",
"where",
"what is",
"why",
"",
"were",
"is",
"what were",
"which",
"what are",
"does",
"what does",
"has",
"can",
"do",
"in what",
"what can",
"what do",
"have",
"what has",
"had",
"on what",
"whom",
"for what",
"could",
"what have",
"what had",
"if"
]
# Function to generate questions and answers with configurable parameters
def generate_qa(text,context, recommended_word, temperature, top_p,num_seq,l_p, num_b):
input_text = f"{recommended_word}: {text+context}"
input_text=re.sub(f'\n'," ",input_text).lower()
input_ids = tokenizer(input_text, return_tensors='pt')
# Generate with configurable parameters
output = model.generate(
**input_ids,
temperature=temperature,
top_p=top_p,
num_return_sequences=num_seq,
max_length=100,
num_beams=num_b,
length_penalty=l_p, do_sample=True,
)
#
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
formatted_output = "\n\n".join(set(generated_text))
return formatted_output
iface = gr.Interface(
fn=generate_qa,
inputs=["text",
gr.inputs.Dropdown([" "]+example_contexts, label="Choose an Example"),
gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"),
gr.inputs.Slider(minimum=0.01, maximum=5, default=3, step=.01, label="l_p")
,
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of beams"),
],
outputs=gr.outputs.Textbox(label="Generated Output"),
title="Question Generation and Answering",
description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
)
# Launch the interface
iface.launch()