Spaces:
Runtime error
Runtime error
Upload 8 files
Browse files- .gitattributes +1 -2
- README.md +5 -5
- app.py +178 -0
- config.json +51 -0
- pytorch_model.bin +3 -0
- requirements.txt +4 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
.gitattributes
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
@@ -25,7 +24,6 @@
|
|
25 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
@@ -33,3 +31,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
|
|
3 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
4 |
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
5 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
|
|
24 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
25 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
26 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
|
27 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
28 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
29 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
|
|
31 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
32 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
33 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
34 |
+
pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
title: Emotion Detection
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
|
|
1 |
---
|
2 |
title: Emotion Detection
|
3 |
+
emoji: π
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.44.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
app.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gc
|
3 |
+
import csv
|
4 |
+
import socket
|
5 |
+
import json
|
6 |
+
import huggingface_hub
|
7 |
+
import requests
|
8 |
+
|
9 |
+
import re as r
|
10 |
+
import gradio as gr
|
11 |
+
import pandas as pd
|
12 |
+
|
13 |
+
from huggingface_hub import Repository
|
14 |
+
from urllib.request import urlopen
|
15 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
16 |
+
|
17 |
+
## connection with HF datasets
|
18 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
19 |
+
# DATASET_NAME = "emotion_detection_dataset"
|
20 |
+
# DATASET_REPO_URL = f"https://huggingface.co/datasets/pragnakalp/{DATASET_NAME}"
|
21 |
+
DATASET_REPO_URL = "https://huggingface.co/datasets/pragnakalp/emotion_detection_dataset"
|
22 |
+
DATA_FILENAME = "emotion_detection_logs.csv"
|
23 |
+
DATA_FILE = os.path.join("emotion_detection_logs", DATA_FILENAME)
|
24 |
+
DATASET_REPO_ID = "pragnakalp/emotion_detection_dataset"
|
25 |
+
print("is none?", HF_TOKEN is None)
|
26 |
+
try:
|
27 |
+
hf_hub_download(
|
28 |
+
repo_id=DATASET_REPO_ID,
|
29 |
+
filename=DATA_FILENAME,
|
30 |
+
cache_dir=DATA_DIRNAME,
|
31 |
+
force_filename=DATA_FILENAME
|
32 |
+
)
|
33 |
+
|
34 |
+
except:
|
35 |
+
print("file not found")
|
36 |
+
|
37 |
+
repo = Repository(
|
38 |
+
local_dir="emotion_detection_logs", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
39 |
+
)
|
40 |
+
|
41 |
+
SENTENCES_VALUE = """Raj loves Simran.\nLast year I lost my Dog.\nI bought a new phone!\nShe is scared of cockroaches.\nWow! I was not expecting that.\nShe got mad at him."""
|
42 |
+
## load model
|
43 |
+
cwd = os.getcwd()
|
44 |
+
model_path = os.path.join(cwd)
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
|
46 |
+
model_base = AutoModelWithLMHead.from_pretrained(model_path)
|
47 |
+
|
48 |
+
def getIP():
|
49 |
+
ip_address = ''
|
50 |
+
try:
|
51 |
+
d = str(urlopen('http://checkip.dyndns.com/')
|
52 |
+
.read())
|
53 |
+
|
54 |
+
return r.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(d).group(1)
|
55 |
+
except Exception as e:
|
56 |
+
print("Error while getting IP address -->",e)
|
57 |
+
return ip_address
|
58 |
+
|
59 |
+
def get_location(ip_addr):
|
60 |
+
location = {}
|
61 |
+
try:
|
62 |
+
ip=ip_addr
|
63 |
+
|
64 |
+
req_data={
|
65 |
+
"ip":ip,
|
66 |
+
"token":"pkml123"
|
67 |
+
}
|
68 |
+
url = "https://demos.pragnakalp.com/get-ip-location"
|
69 |
+
|
70 |
+
# req_data=json.dumps(req_data)
|
71 |
+
# print("req_data",req_data)
|
72 |
+
headers = {'Content-Type': 'application/json'}
|
73 |
+
|
74 |
+
response = requests.request("POST", url, headers=headers, data=json.dumps(req_data))
|
75 |
+
response = response.json()
|
76 |
+
print("response======>>",response)
|
77 |
+
return response
|
78 |
+
except Exception as e:
|
79 |
+
print("Error while getting location -->",e)
|
80 |
+
return location
|
81 |
+
|
82 |
+
|
83 |
+
"""
|
84 |
+
generate emotions of the sentences
|
85 |
+
"""
|
86 |
+
def get_emotion(text):
|
87 |
+
|
88 |
+
# input_ids = tokenizer.encode(text + '</s>', return_tensors='pt')
|
89 |
+
input_ids = tokenizer.encode(text, return_tensors='pt')
|
90 |
+
output = model_base.generate(input_ids=input_ids,
|
91 |
+
max_length=2)
|
92 |
+
|
93 |
+
dec = [tokenizer.decode(ids) for ids in output]
|
94 |
+
label = dec[0]
|
95 |
+
gc.collect()
|
96 |
+
return label
|
97 |
+
|
98 |
+
def generate_emotion(article):
|
99 |
+
table = {'Input':[], 'Detected Emotion':[]}
|
100 |
+
if article.strip():
|
101 |
+
sen_list = article
|
102 |
+
sen_list = sen_list.split('\n')
|
103 |
+
while("" in sen_list):
|
104 |
+
sen_list.remove("")
|
105 |
+
sen_list_temp = sen_list[0:]
|
106 |
+
print(sen_list_temp)
|
107 |
+
results_dict = []
|
108 |
+
results = []
|
109 |
+
|
110 |
+
for sen in sen_list_temp:
|
111 |
+
if(sen.strip()):
|
112 |
+
cur_result = get_emotion(sen)
|
113 |
+
|
114 |
+
results.append(cur_result)
|
115 |
+
results_dict.append(
|
116 |
+
{
|
117 |
+
'sentence': sen,
|
118 |
+
'emotion': cur_result
|
119 |
+
}
|
120 |
+
)
|
121 |
+
|
122 |
+
table = {'Input':sen_list_temp, 'Detected Emotion':results}
|
123 |
+
gc.collect()
|
124 |
+
save_data_and_sendmail(article,results_dict,sen_list, results)
|
125 |
+
return pd.DataFrame(table)
|
126 |
+
else:
|
127 |
+
raise gr.Error("Please enter text in inputbox!!!!")
|
128 |
+
|
129 |
+
"""
|
130 |
+
Save generated details
|
131 |
+
"""
|
132 |
+
def save_data_and_sendmail(article,results_dict,sen_list,results):
|
133 |
+
try:
|
134 |
+
|
135 |
+
ip_address= getIP()
|
136 |
+
print(ip_address)
|
137 |
+
location = get_location(ip_address)
|
138 |
+
print(location)
|
139 |
+
|
140 |
+
add_csv = [article,results_dict,ip_address,location]
|
141 |
+
with open(DATA_FILE, "a") as f:
|
142 |
+
writer = csv.writer(f)
|
143 |
+
# write the data
|
144 |
+
writer.writerow(add_csv)
|
145 |
+
commit_url = repo.push_to_hub()
|
146 |
+
print("commit data :",commit_url)
|
147 |
+
|
148 |
+
url = 'https://pragnakalpdev33.pythonanywhere.com/HF_space_emotion_detection_demo'
|
149 |
+
# url = 'https://pragnakalpdev35.pythonanywhere.com/HF_space_emotion_detection'
|
150 |
+
|
151 |
+
myobj = {"sentences":sen_list,"gen_results":results,"ip_addr":ip_address,'loc':location}
|
152 |
+
response = requests.post(url, json = myobj)
|
153 |
+
print("response=-----=",response.status_code)
|
154 |
+
|
155 |
+
except Exception as e:
|
156 |
+
return "Error while sending mail" + str(e)
|
157 |
+
|
158 |
+
return "Successfully save data"
|
159 |
+
|
160 |
+
"""
|
161 |
+
UI design for demo using gradio app
|
162 |
+
"""
|
163 |
+
inputs = gr.Textbox(value=SENTENCES_VALUE,lines=3, label="Sentences",elem_id="inp_div")
|
164 |
+
outputs = [gr.Dataframe(row_count = (3, "dynamic"), col_count=(2, "fixed"), label="Here is the Result", headers=["Input","Detected Emotion"],wrap=True)]
|
165 |
+
|
166 |
+
demo = gr.Interface(
|
167 |
+
generate_emotion,
|
168 |
+
inputs,
|
169 |
+
outputs,
|
170 |
+
title="Emotion Detection",
|
171 |
+
css=".gradio-container {background-color: lightgray} #inp_div {background-color: #FB3D5;}",
|
172 |
+
article="""<p style='text-align: center;'>Provide us your <a href="https://www.pragnakalp.com/contact/" target="_blank">feedback</a> on this demo and feel free
|
173 |
+
to contact us at <a href="mailto:letstalk@pragnakalp.com" target="_blank">letstalk@pragnakalp.com</a> if you want to have your own Emotion Detection system.
|
174 |
+
We will be happy to serve you for your requirement. And don't forget to check out more interesting
|
175 |
+
<a href="https://www.pragnakalp.com/services/natural-language-processing-services/" target="_blank">NLP services</a> we are offering.</p>
|
176 |
+
<p style='text-align: center;'>Developed by: <a href="https://www.pragnakalp.com" target="_blank">Pragnakalp Techlabs</a></p>"""
|
177 |
+
)
|
178 |
+
demo.launch()
|
config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"T5ForConditionalGeneration"
|
4 |
+
],
|
5 |
+
"d_ff": 3072,
|
6 |
+
"d_kv": 64,
|
7 |
+
"d_model": 768,
|
8 |
+
"decoder_start_token_id": 0,
|
9 |
+
"dropout_rate": 0.1,
|
10 |
+
"eos_token_id": 1,
|
11 |
+
"initializer_factor": 1.0,
|
12 |
+
"is_encoder_decoder": true,
|
13 |
+
"layer_norm_epsilon": 1e-06,
|
14 |
+
"model_type": "t5",
|
15 |
+
"n_positions": 512,
|
16 |
+
"num_heads": 12,
|
17 |
+
"num_layers": 12,
|
18 |
+
"output_past": true,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"relative_attention_num_buckets": 32,
|
21 |
+
"task_specific_params": {
|
22 |
+
"summarization": {
|
23 |
+
"early_stopping": true,
|
24 |
+
"length_penalty": 2.0,
|
25 |
+
"max_length": 200,
|
26 |
+
"min_length": 30,
|
27 |
+
"no_repeat_ngram_size": 3,
|
28 |
+
"num_beams": 4,
|
29 |
+
"prefix": "summarize: "
|
30 |
+
},
|
31 |
+
"translation_en_to_de": {
|
32 |
+
"early_stopping": true,
|
33 |
+
"max_length": 300,
|
34 |
+
"num_beams": 4,
|
35 |
+
"prefix": "translate English to German: "
|
36 |
+
},
|
37 |
+
"translation_en_to_fr": {
|
38 |
+
"early_stopping": true,
|
39 |
+
"max_length": 300,
|
40 |
+
"num_beams": 4,
|
41 |
+
"prefix": "translate English to French: "
|
42 |
+
},
|
43 |
+
"translation_en_to_ro": {
|
44 |
+
"early_stopping": true,
|
45 |
+
"max_length": 300,
|
46 |
+
"num_beams": 4,
|
47 |
+
"prefix": "translate English to Romanian: "
|
48 |
+
}
|
49 |
+
},
|
50 |
+
"vocab_size": 32128
|
51 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:707b2f7cae8cd8befb88d71f9b161e9297175f685c2c7bf91e151322d346a045
|
3 |
+
size 891692894
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers==4.30.2
|
2 |
+
sentencepiece==0.1.99
|
3 |
+
torch==2.0.1
|
4 |
+
gradio== 3.44.1
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model_max_length": 512}
|