Spaces:
Running
Running
File size: 4,214 Bytes
ca85e27 e589d46 ca85e27 e589d46 34e5eed ebe4bfb e589d46 d953ee7 e915ee6 d953ee7 e915ee6 7ddd7f1 d953ee7 e915ee6 6d31d85 1a53131 6d31d85 1a53131 6d31d85 1a53131 e589d46 e915ee6 52eef08 6d31d85 e915ee6 52eef08 a5bd6e3 d953ee7 52eef08 33fbc82 d953ee7 243f493 52eef08 b6ccbe5 6d31d85 e589d46 5907edf b38925d e589d46 6d31d85 ebe4bfb 6d31d85 b38925d a069d53 b38925d 4b76193 6d31d85 ca85e27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoConfig, AutoModel, AutoModelForSequenceClassification
description_sentence = "<h3>Demo EmotioNL</h3>\nThis demo allows you to analyse the emotion in a sentence."
description2 = "<h3>Demo EmotioNL</h3>\nThis demo allows you to analyse the emotions in a dataset.\nThe data should be in tsv-format with two named columns: the first column (id) should contain the sentence IDs, and the second column (text) should contain the actual texts. Optionally, there is a third column named 'date', which specifies the date associated with the text (e.g., tweet date). This column is necessary when the options 'emotion distribution over time' and 'peaks' are selected."
inference_modelpath = "model/checkpoint-128"
def what_happened(text, file_object, option_list):
if file_object:
output = "You uploaded a file."
if len(option_list) > 0:
output = output + "\nYou selected these options:\n- " + "\n- ".join(option_list)
else:
output = "Normally, this demo should analyse the emotions in this text:\n" + text
if len(option_list) > 0:
output = output + "\nYou can only select options when uploading a dataset."
return output
def what_happened1(text):
output = "Normally, this demo should analyse the emotions in this text:\n" + text
return output
def what_happened2(file_object, option_list):
if file_object:
output1 = "You uploaded a file."
if len(option_list) > 0:
output1 = output1 + "\nYou selected these options:\n- " + "\n- ".join(option_list)
else:
output1 = "You should upload a file."
output2 = output3 = output4 = output5 = "This option was not selected."
if "emotion frequencies" in option_list:
output2 = "This option was selected."
if "emotion distribution over time" in option_list:
output3 = "This option was selected."
if "peaks" in option_list:
output4 = "This option was selected."
if "topics" in option_list:
output5 = "This option was selected."
return [output1, output2, output3, output4, output5]
def inference_sentence(text):
tokenizer = AutoTokenizer.from_pretrained(inference_modelpath)
model = AutoModelForSequenceClassification.from_pretrained(inference_modelpath)
inputs = tokenizer(text, return_tensors="pt")
with torch.no_grad(): # run model
logits = model(**inputs).logits
predicted_class_id = logits.argmax().item()
output = model.config.id2label[predicted_class_id]
return output
iface0 = gr.Interface(
fn=what_happened,
inputs=[
gr.Textbox(
label= "Enter a sentence",
lines=1,
value="Your name"),
gr.File(
label="Or upload a dataset"),
gr.CheckboxGroup(
["emotion frequencies", "emotion distribution over time", "peaks", "topics"],
label = "Select options")
],
outputs="text")
iface_sentence = gr.Interface(
fn=inference_sentence,
description = description_sentence,
inputs = gr.Textbox(
label="Enter a sentence",
lines=1),
outputs="text")
iface2 = gr.Interface(
fn=what_happened2,
description = description2,
inputs=[
gr.File(
label="Upload a dataset"),
gr.CheckboxGroup(
["emotion frequencies", "emotion distribution over time", "peaks", "topics"],
label = "Select options")
],
#outputs=["text", "text", "text", "text", "text"])
outputs = [
gr.Textbox(label="Output file"),
gr.Textbox(label="Emotion frequencies"),
gr.Textbox(label="Emotion distribution over time"),
gr.Textbox(label="Peaks"),
gr.Textbox(label="Topics")
])
iface = gr.TabbedInterface([iface1, iface2], ["Sentence", "Dataset"])
iface.launch() |