ChrisLalk commited on
Commit
3d3d808
·
verified ·
1 Parent(s): 2cc1a3a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +90 -0
README.md CHANGED
@@ -50,3 +50,93 @@ This is basically the German translation of arpanghoshal/EmoRoBERTa. We used the
50
 
51
  Use the code below to get started with the model.
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  Use the code below to get started with the model.
52
 
53
+ ```python
54
+ # pip install transformers[torch]
55
+ # pip install pandas, transformers, numpy, tqdm, openpyxl
56
+ import pandas as pd
57
+ import torch
58
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer
59
+ import numpy as np
60
+ from tqdm import tqdm
61
+ import time
62
+ import os
63
+ from transformers import DataCollatorWithPadding
64
+ import json
65
+
66
+ # create base path and input and output path for the model folder and the file folder
67
+ base_path = "/share/users/staff/c/clalk/Emotionen"
68
+ model_path = os.path.join(base_path, 'Modell')
69
+ file_path = os.path.join(base_path, 'Datensatz')
70
+
71
+ MODEL = "intfloat/multilingual-e5-large"
72
+ tokenizer = AutoTokenizer.from_pretrained(MODEL, do_lower_case=False)
73
+ model = AutoModelForSequenceClassification.from_pretrained(
74
+ model_path,
75
+ from_tf=False,
76
+ from_flax=False,
77
+ trust_remote_code=False,
78
+ num_labels=28,
79
+ ignore_mismatched_sizes=True
80
+ )
81
+ data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
82
+
83
+ # Path to the file
84
+ os.chdir(file_path)
85
+ df_full = pd.read_excel("speech_turns_pat.xlsx", index_col=None)
86
+
87
+ if 'Unnamed: 0' in df_full.columns:
88
+ df_full = df_full.drop(columns=['Unnamed: 0'])
89
+
90
+ df_full.reset_index(drop=True, inplace=True)
91
+
92
+ # Tokenization and inference function
93
+ def infer_texts(texts):
94
+ tokenized_texts = tokenizer(texts, return_tensors="pt", padding=True, truncation=True)
95
+ class SimpleDataset:
96
+ def __init__(self, tokenized_texts):
97
+ self.tokenized_texts = tokenized_texts
98
+ def __len__(self):
99
+ return len(self.tokenized_texts["input_ids"])
100
+ def __getitem__(self, idx):
101
+ return {k: v[idx] for k, v in self.tokenized_texts.items()}
102
+ test_dataset = SimpleDataset(tokenized_texts)
103
+ trainer = Trainer(model=model, data_collator=data_collator)
104
+ predictions = trainer.predict(test_dataset)
105
+ sigmoid = torch.nn.Sigmoid()
106
+ probs = sigmoid(torch.Tensor(predictions.predictions))
107
+ return np.round(np.array(probs), 3).tolist()
108
+
109
+ start_time = time.time()
110
+ df = df_full
111
+
112
+ # Save results in a dict
113
+ results = []
114
+ for index, row in tqdm(df.iterrows(), total=df.shape[0]):
115
+ patient_texts = row['Patient']
116
+ prob_list = infer_texts(patient_texts)
117
+ results.append({
118
+ "File": row['Class']+"_"+row['session'],
119
+ "Class": row['Class'],
120
+ "session": row['session'],
121
+ "short_id": row["short_id"],
122
+ "long_id": row["long_id"],
123
+ "Sentence": patient_texts,
124
+ "Prediction": prob_list[0],
125
+ "hscl-11": row["Gesamtscore_hscl"],
126
+ "srs": row["srs_ges"],
127
+ })
128
+
129
+ # Convert results to df
130
+ df_results = pd.DataFrame(results)
131
+ df_results.to_json("emo_speech_turn_inference.json")
132
+
133
+ end_time = time.time()
134
+ elapsed_time = end_time - start_time
135
+ print(f"Elapsed time: {elapsed_time:.2f} seconds")
136
+ print(df_results)
137
+
138
+ emo_df = pd.DataFrame(df_results['Prediction'].tolist(), index=df_results["Class"].index)
139
+ col_names = ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral']
140
+ emo_df.columns = col_names
141
+ print(emo_df)
142
+ ```