Evelyn18 commited on
Commit
bc3eb3d
·
1 Parent(s): 3246640

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -31
app.py CHANGED
@@ -1,33 +1,44 @@
1
- import gradio as gr
2
-
3
- title = "ELECTRA"
4
-
5
- description = "Gradio Demo for ELECTRA. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
6
-
7
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2003.10555' target='_blank'>ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators</a></p>"
8
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- examples = [
11
- ["My name is Sarah and I live in London","electra_large_discriminator_squad2_512","Where do I live?"]
12
- ]
13
-
14
- io1 = gr.Interface.load("huggingface/ahotrod/electra_large_discriminator_squad2_512")
15
-
16
- io2 = gr.Interface.load("huggingface/deepset/electra-base-squad2")
17
-
18
- def inference(context, model,question):
19
- if model == "electra_large_discriminator_squad2_512":
20
- outlabel = io1(context,question)
21
- else:
22
- outlabel = io2(context,question)
23
- return outlabel
24
-
25
 
26
- gr.Interface(
27
- inference,
28
- [gr.inputs.Textbox(label="Context",lines=10),gr.inputs.Dropdown(choices=["electra_large_discriminator_squad2_512","electra-base-squad2"], type="value", default="electra_large_discriminator_squad2_512", label="model"),gr.inputs.Textbox(label="Question Answering")],
29
- [gr.outputs.Textbox(label="Output")],
30
- examples=examples,
31
- article=article,
32
- title=title,
33
- description=description).launch(enable_queue=True, cache_examples=True)
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+
4
+ tokenizer = AutoTokenizer.from_pretrained("Evelyn18/distilbert-base-uncased-finetuned-squad")
5
+ model = AutoModelForCausalLM.from_pretrained("Evelyn18/distilbert-base-uncased-finetuned-squad")
6
+
7
+ def predict(input, history=[]):
8
+ # tokenize the new input sentence
9
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
10
+
11
+ # append the new user input tokens to the chat history
12
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
13
+
14
+ # generate a response
15
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
16
+
17
+ # convert the tokens to text, and then split the responses into lines
18
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
19
+ response.remove("")
20
+
21
+ # write some HTML
22
+ html = "<div class='chatbot'>"
23
+ for m, msg in enumerate(response):
24
+ cls = "user" if m%2 == 0 else "bot"
25
+ html += "<div class='msg {}'> {}</div>".format(cls, msg)
26
+ html += "</div>"
27
+
28
+ return html, history
29
 
30
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ css = """
33
+ .chatbox {display:flex;flex-direction:column}
34
+ .msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
35
+ .msg.user {background-color:cornflowerblue;color:white}
36
+ .msg.bot {background-color:lightgray;align-self:self-end}
37
+ .footer {display:none !important}
38
+ """
39
+
40
+ gr.Interface(fn=predict,
41
+ theme="default",
42
+ inputs=[gr.inputs.Textbox(placeholder="How are you?"), "state"],
43
+ outputs=["html", "state"],
44
+ css=css).launch()