File size: 1,555 Bytes
1687c0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from transformers import RobertaForQuestionAnswering
from transformers import BertForQuestionAnswering
from transformers import AutoTokenizer
model1 = RobertaForQuestionAnswering.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large")
tokenizer1 = AutoTokenizer.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large")
roberta_large = pipeline(task='question-answering', model=model1, tokenizer=tokenizer1)
def Q_A(contetx, question):
answer_pedram = roberta_large({"question":question, "context":context})['answer']
return answer_pedram
# Create title, description and article strings
title = "Question and answer based on Roberta model"
description = "سیستم پردازش زبانی پرسش و پاسخ"
article = "آموزش داده شده با مدل زبانی روبرتا"
demo = gr.Interface(fn=Q_A, # mapping function from input to output
inputs=[gr.Textbox(label='پرسش خوذ را وارد کنید:', show_label=True, text_align='right'),
gr.Textbox(label='متن منبع خود را وارد کنید', show_label=True, text_align='right')], # what are the inputs?
outputs=gr.Text(), # what are the outputs?
# our fn has two outputs, therefore we have two outputs
# Create examples list from "examples/" directory
title=title,
description=description,
article=article)
# Launch the demo!
demo.launch(share=True) |