|
import gradio as gr |
|
from transformers import RobertaForQuestionAnswering |
|
from transformers import BertForQuestionAnswering |
|
from transformers import AutoTokenizer |
|
|
|
model1 = RobertaForQuestionAnswering.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large") |
|
tokenizer1 = AutoTokenizer.from_pretrained("pedramyazdipoor/persian_xlm_roberta_large") |
|
|
|
roberta_large = pipeline(task='question-answering', model=model1, tokenizer=tokenizer1) |
|
|
|
def Q_A(contetx, question): |
|
answer_pedram = roberta_large({"question":question, "context":context})['answer'] |
|
return answer_pedram |
|
|
|
|
|
|
|
|
|
title = "Question and answer based on Roberta model" |
|
description = "سیستم پردازش زبانی پرسش و پاسخ" |
|
article = "آموزش داده شده با مدل زبانی روبرتا" |
|
|
|
|
|
demo = gr.Interface(fn=Q_A, |
|
inputs=[gr.Textbox(label='پرسش خوذ را وارد کنید:', show_label=True, text_align='right'), |
|
gr.Textbox(label='متن منبع خود را وارد کنید', show_label=True, text_align='right')], |
|
outputs=gr.Text(), |
|
|
|
|
|
title=title, |
|
description=description, |
|
article=article) |
|
|
|
|
|
demo.launch(share=True) |