File size: 1,437 Bytes
0fdf9b7
 
 
 
 
 
d9b9e27
 
 
 
 
 
 
 
 
0fdf9b7
d9b9e27
0fdf9b7
d9b9e27
 
 
 
 
0fdf9b7
 
d9b9e27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2172b4
d9b9e27
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# Benchmarks: NT, Why is blood important?
#model_name = "deepset/roberta-base-squad2"       # 180
#model_name = "deepset/deberta-v3-large-squad2"  # est. 4X
model_name = "deepset/tinyroberta-squad2"       # 86
#model_name = "deepset/minilm-uncased-squad2"    # 96
#model_name = "deepset/electra-base-squad2"      # 185 (nice wordy results)


# Install Dependences
# Use my Conda qna environment, then you're all set
# !pip install transformers
# !pip install ipywidgets 
# !pip install gradio  # see setup for installing gradio

import gradio as gr
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline

nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)

def question_answer(context_filename, question):
    """Produce a NLP response based on the input text filename and question."""
    with open(context_filename) as f:
        context = f.read()
        nlp_input =  {'question': question, 'context': context}
        result = nlp(nlp_input)
    return result['answer']

demo = gr.Interface(
    fn=question_answer, 
    #inputs=gr.inputs.Textbox(lines=2, placeholder='Enter your question'),
    inputs=[
        gr.Dropdown([
            'spiderman.txt',
            'world-john.txt',
            'world-romans.txt',            
            'world-nt.txt',
            'world-ot.txt']),  # 'lotr01.txt'
        "text"
        ],
    outputs="textbox")

demo.launch(share=False)