File size: 1,592 Bytes
d9b9e27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# https://huggingface.co/transformers/main_classes/pipelines.html
# https://huggingface.co/models?filter=conversational



# Install Dependences
# Use my Conda qna environment, then you're all set
# !pip install transformers
# !pip install ipywidgets 
# !pip install gradio  # see setup for installing gradio

# Import Dependencies
from transformers import pipeline 
import gradio as gr

# Create the Q&A pipeline
nlp = pipeline('question-answering', model='deepset/roberta-base-squad2', tokenizer='deepset/roberta-base-squad2')
#nlp = pipeline('question-answering', model='bert-large-uncased-whole-word-masking-finetuned-squad ', tokenizer='bert-large-uncased-whole-word-masking-finetuned-squad ')
#nlp = pipeline("question-answering", model='distilbert-base-cased-distilled-squad')
#nlp = pipeline("question-answering", model='distilbert-base-uncased-distilled-squad')

def question_answer(context_filename, question):
    """Produce a NLP response based on the input text filename and question."""
    with open(context_filename) as f:
        context = f.read()
    nlp_input =  {'question': question, 'context': context}
    result = nlp(nlp_input)
    return result['answer']

demo = gr.Interface(
    fn=question_answer, 
    #inputs=gr.inputs.Textbox(lines=2, placeholder='Enter your question'),
    inputs=[
        gr.Dropdown([
            'spiderman.txt',
            'world-john.txt',
            'world-romans.txt',            
            'world-nt.txt',
            'world-ot.txt']),  # 'lotr01.txt'
        "text"
        ],
    outputs="textbox")

demo.launch(share=True)