j-hartmann commited on
Commit
8980160
1 Parent(s): 8e113ea

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import numpy as np
4
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer
5
+
6
+ # load tokenizer and model, create trainer
7
+ model_name = "j-hartmann/emotion-english-distilroberta-base"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
10
+ trainer = Trainer(model=model)
11
+
12
+ # summary function - test for single gradio function interfrace
13
+ def bulk_function(filename):
14
+ # Create class for data preparation
15
+ class SimpleDataset:
16
+ def __init__(self, tokenized_texts):
17
+ self.tokenized_texts = tokenized_texts
18
+
19
+ def __len__(self):
20
+ return len(self.tokenized_texts["input_ids"])
21
+
22
+ def __getitem__(self, idx):
23
+ return {k: v[idx] for k, v in self.tokenized_texts.items()}
24
+
25
+ # read file lines
26
+ with open(filename.name, "r") as f:
27
+ lines = f.readlines()
28
+ # expects unnamed:0 or index, col name -> strip both
29
+ lines_s = [item.split("\n")[0].split(",")[-1] for item in lines][1:]
30
+
31
+ # Tokenize texts and create prediction data set
32
+ tokenized_texts = tokenizer(lines_s,truncation=True,padding=True)
33
+ pred_dataset = SimpleDataset(tokenized_texts)
34
+
35
+ # Run predictions -> predict whole df
36
+ predictions = trainer.predict(pred_dataset)
37
+
38
+ # Transform predictions to labels
39
+ preds = predictions.predictions.argmax(-1)
40
+ labels = pd.Series(preds).map(model.config.id2label)
41
+ scores = (np.exp(predictions[0])/np.exp(predictions[0]).sum(-1,keepdims=True)).max(1)
42
+ # scores raw
43
+ temp = (np.exp(predictions[0])/np.exp(predictions[0]).sum(-1,keepdims=True))
44
+
45
+ # work in progress
46
+ # container
47
+ anger = []
48
+ disgust = []
49
+ fear = []
50
+ joy = []
51
+ neutral = []
52
+ sadness = []
53
+ surprise = []
54
+
55
+ # extract scores (as many entries as exist in pred_texts)
56
+ for i in range(len(lines_s)):
57
+ anger.append(temp[i][0])
58
+ disgust.append(temp[i][1])
59
+ fear.append(temp[i][2])
60
+ joy.append(temp[i][3])
61
+ neutral.append(temp[i][4])
62
+ sadness.append(temp[i][5])
63
+ surprise.append(temp[i][6])
64
+
65
+ # define df
66
+ df = pd.DataFrame(list(zip(lines_s,preds,labels,scores, anger, disgust, fear, joy, neutral, sadness, surprise)), columns=['text','pred','label','score', 'anger', 'disgust', 'fear', 'joy', 'neutral', 'sadness', 'surprise'])
67
+
68
+ # save results to csv
69
+ YOUR_FILENAME = filename.name.split(".")[0] + "_emotion_predictions" + ".csv" # name your output file
70
+ df.to_csv(YOUR_FILENAME)
71
+
72
+ # return dataframe for space output
73
+ return YOUR_FILENAME
74
+
75
+ gr.Interface(bulk_function, [gr.inputs.File(file_count="single", type="file", label="str", optional=False),],["file"],
76
+ ).launch(debug=True)