Spaces:
Running
Running
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, AutoConfig, AutoModel, AutoModelForSequenceClassification | |
description_sentence = "<h3>Demo EmotioNL</h3>\nThis demo allows you to analyse the emotion in a sentence." | |
description2 = "<h3>Demo EmotioNL</h3>\nThis demo allows you to analyse the emotions in a dataset.\nThe data should be in tsv-format with two named columns: the first column (id) should contain the sentence IDs, and the second column (text) should contain the actual texts. Optionally, there is a third column named 'date', which specifies the date associated with the text (e.g., tweet date). This column is necessary when the options 'emotion distribution over time' and 'peaks' are selected." | |
inference_modelpath = "model/checkpoint-128" | |
def what_happened(text, file_object, option_list): | |
if file_object: | |
output = "You uploaded a file." | |
if len(option_list) > 0: | |
output = output + "\nYou selected these options:\n- " + "\n- ".join(option_list) | |
else: | |
output = "Normally, this demo should analyse the emotions in this text:\n" + text | |
if len(option_list) > 0: | |
output = output + "\nYou can only select options when uploading a dataset." | |
return output | |
def what_happened1(text): | |
output = "Normally, this demo should analyse the emotions in this text:\n" + text | |
return output | |
def what_happened2(file_object, option_list): | |
if file_object: | |
output1 = "You uploaded a file." | |
if len(option_list) > 0: | |
output1 = output1 + "\nYou selected these options:\n- " + "\n- ".join(option_list) | |
else: | |
output1 = "You should upload a file." | |
output2 = output3 = output4 = output5 = "This option was not selected." | |
if "emotion frequencies" in option_list: | |
output2 = "This option was selected." | |
if "emotion distribution over time" in option_list: | |
output3 = "This option was selected." | |
if "peaks" in option_list: | |
output4 = "This option was selected." | |
if "topics" in option_list: | |
output5 = "This option was selected." | |
return [output1, output2, output3, output4, output5] | |
def inference_sentence(text): | |
tokenizer = AutoTokenizer.from_pretrained(inference_modelpath) | |
model = AutoModelForSequenceClassification.from_pretrained(inference_modelpath) | |
inputs = tokenizer(text, return_tensors="pt") | |
with torch.no_grad(): # run model | |
logits = model(**inputs).logits | |
predicted_class_id = logits.argmax().item() | |
output = model.config.id2label[predicted_class_id] | |
return output | |
iface0 = gr.Interface( | |
fn=what_happened, | |
inputs=[ | |
gr.Textbox( | |
label= "Enter a sentence", | |
lines=1, | |
value="Your name"), | |
gr.File( | |
label="Or upload a dataset"), | |
gr.CheckboxGroup( | |
["emotion frequencies", "emotion distribution over time", "peaks", "topics"], | |
label = "Select options") | |
], | |
outputs="text") | |
iface_sentence = gr.Interface( | |
fn=inference_sentence, | |
description = description_sentence, | |
inputs = gr.Textbox( | |
label="Enter a sentence", | |
lines=1), | |
outputs="text") | |
iface2 = gr.Interface( | |
fn=what_happened2, | |
description = description2, | |
inputs=[ | |
gr.File( | |
label="Upload a dataset"), | |
gr.CheckboxGroup( | |
["emotion frequencies", "emotion distribution over time", "peaks", "topics"], | |
label = "Select options") | |
], | |
#outputs=["text", "text", "text", "text", "text"]) | |
outputs = [ | |
gr.Textbox(label="Output file"), | |
gr.Textbox(label="Emotion frequencies"), | |
gr.Textbox(label="Emotion distribution over time"), | |
gr.Textbox(label="Peaks"), | |
gr.Textbox(label="Topics") | |
]) | |
iface = gr.TabbedInterface([iface1, iface2], ["Sentence", "Dataset"]) | |
iface.launch() |