Spaces:
Sleeping
Sleeping
paragon-analytics
commited on
Commit
•
b77e093
1
Parent(s):
9acf9a7
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,8 @@
|
|
1 |
-
# Import packages
|
2 |
-
|
3 |
import numpy as np
|
4 |
import pandas as pd
|
5 |
import matplotlib.pyplot as plt
|
6 |
import re
|
7 |
-
|
8 |
-
# tensorflow imports:
|
9 |
import tensorflow as tf
|
10 |
import pickle
|
11 |
import gradio as gr
|
@@ -14,105 +11,82 @@ import spacy
|
|
14 |
from spacy import displacy
|
15 |
import streamlit as st
|
16 |
import spacy_streamlit
|
17 |
-
nlp = spacy.load('en_core_web_sm')
|
18 |
import torch
|
19 |
-
import
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
22 |
tokenizer = AutoTokenizer.from_pretrained("paragon-analytics/bert_resil")
|
23 |
model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/bert_resil")
|
24 |
|
25 |
-
#
|
26 |
-
# para_model = AutoModelForSeq2SeqLM.from_pretrained("paragon-analytics/t5_para")
|
27 |
-
|
28 |
kw_extractor = yake.KeywordExtractor()
|
29 |
custom_kw_extractor = yake.KeywordExtractor(lan="en", n=2, dedupLim=0.2, top=10, features=None)
|
30 |
|
31 |
-
|
32 |
-
max_len = 111
|
33 |
-
|
34 |
from transformers_interpret import SequenceClassificationExplainer
|
35 |
-
cls_explainer = SequenceClassificationExplainer(
|
36 |
-
model,
|
37 |
-
tokenizer)
|
38 |
-
|
39 |
-
# load the model from disk
|
40 |
-
#filename = 'resil_lstm_model.sav'
|
41 |
-
#lmodel = pickle.load(open(filename, 'rb'))
|
42 |
-
|
43 |
-
# load the model from disk
|
44 |
-
#filename = 'tokenizer.pickle'
|
45 |
-
#tok = pickle.load(open(filename, 'rb'))
|
46 |
|
47 |
def process_final_text(text):
|
48 |
X_test = str(text).lower()
|
49 |
|
|
|
50 |
encoded_input = tokenizer(X_test, return_tensors='pt')
|
51 |
output = model(**encoded_input)
|
52 |
scores = output[0][0].detach().numpy()
|
53 |
scores = tf.nn.softmax(scores)
|
54 |
|
55 |
-
# Get Keywords
|
56 |
keywords = custom_kw_extractor.extract_keywords(X_test)
|
57 |
letter = []
|
58 |
score = []
|
59 |
for i in keywords:
|
60 |
-
if i[1]>0.4:
|
61 |
a = "+++"
|
62 |
-
elif (i[1]<=0.4) and (i[1]>0.1):
|
63 |
a = "++"
|
64 |
-
elif (i[1]<=0.1) and (i[1]>0.01):
|
65 |
a = "+"
|
66 |
else:
|
67 |
a = "NA"
|
68 |
-
|
69 |
letter.append(i[0])
|
70 |
score.append(a)
|
71 |
|
72 |
-
keywords = [(letter[i], score[i]) for i in range(
|
73 |
|
74 |
-
# Get NER
|
75 |
-
# NER:
|
76 |
doc = nlp(text)
|
77 |
sp_html = displacy.render(doc, style="ent", page=True, jupyter=False)
|
78 |
-
NER =
|
79 |
-
""
|
80 |
-
+ sp_html
|
81 |
-
+ ""
|
82 |
-
)
|
83 |
|
84 |
-
# Transformer Interpret
|
85 |
word_attributions = cls_explainer(X_test)
|
86 |
letter = []
|
87 |
score = []
|
88 |
for i in word_attributions:
|
89 |
-
if i[1]>0.5:
|
90 |
a = "++"
|
91 |
-
elif (i[1]<=0.5) and (i[1]>0.1):
|
92 |
a = "+"
|
93 |
-
elif (i[1]
|
94 |
a = "-"
|
95 |
-
elif i[1]
|
96 |
a = "--"
|
97 |
else:
|
98 |
a = "NA"
|
99 |
-
|
100 |
letter.append(i[0])
|
101 |
score.append(a)
|
102 |
|
103 |
-
word_attributions = [(letter[i], score[i]) for i in range(
|
104 |
|
105 |
-
|
106 |
-
# batch = para_tokenizer(X_test, return_tensors='pt')
|
107 |
-
# generated_ids = para_model.generate(batch['input_ids'])
|
108 |
-
# para_list = para_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
109 |
-
|
110 |
-
return {"Inspirational": float(scores.numpy()[1]), "Uninspiring": float(scores.numpy()[0])},keywords,NER,word_attributions
|
111 |
|
112 |
def main(prob1):
|
113 |
text = str(prob1)
|
114 |
obj = process_final_text(text)
|
115 |
-
return obj[0],obj[1],obj[2],obj[3]
|
116 |
|
117 |
title = "Welcome to **HopeHarbor**! ✨"
|
118 |
description1 = """
|
@@ -122,29 +96,47 @@ with gr.Blocks(title=title) as demo:
|
|
122 |
gr.Markdown(f"## {title}")
|
123 |
gr.Markdown(description1)
|
124 |
gr.Markdown("""---""")
|
125 |
-
prob1 = gr.Textbox(label="Enter Your Text Here:",lines=2, placeholder="Type it here ...")
|
126 |
submit_btn = gr.Button("Analyze")
|
127 |
-
#text = gr.Textbox(label="Text:",lines=2, placeholder="Please enter text here ...")
|
128 |
-
#submit_btn2 = gr.Button("Analyze")
|
129 |
|
130 |
with gr.Column(visible=True) as output_col:
|
131 |
-
label = gr.Label(label
|
132 |
-
impplot = gr.HighlightedText(
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
|
|
|
|
|
|
140 |
|
141 |
submit_btn.click(
|
142 |
main,
|
143 |
[prob1],
|
144 |
-
[label,impplot,NER,intp],
|
|
|
145 |
)
|
146 |
|
147 |
gr.Markdown("### Click on any of the examples below to see to what extent they contain resilience messaging:")
|
148 |
-
gr.Examples(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
-
demo.launch()
|
|
|
1 |
+
# Import packages
|
|
|
2 |
import numpy as np
|
3 |
import pandas as pd
|
4 |
import matplotlib.pyplot as plt
|
5 |
import re
|
|
|
|
|
6 |
import tensorflow as tf
|
7 |
import pickle
|
8 |
import gradio as gr
|
|
|
11 |
from spacy import displacy
|
12 |
import streamlit as st
|
13 |
import spacy_streamlit
|
|
|
14 |
import torch
|
15 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
16 |
+
|
17 |
+
# Load NLP model
|
18 |
+
nlp = spacy.load('en_core_web_sm')
|
19 |
+
|
20 |
+
# Load your custom tokenizer and model
|
21 |
tokenizer = AutoTokenizer.from_pretrained("paragon-analytics/bert_resil")
|
22 |
model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/bert_resil")
|
23 |
|
24 |
+
# Initialize YAKE keyword extractor
|
|
|
|
|
25 |
kw_extractor = yake.KeywordExtractor()
|
26 |
custom_kw_extractor = yake.KeywordExtractor(lan="en", n=2, dedupLim=0.2, top=10, features=None)
|
27 |
|
28 |
+
# Initialize the transformers interpret explainer
|
|
|
|
|
29 |
from transformers_interpret import SequenceClassificationExplainer
|
30 |
+
cls_explainer = SequenceClassificationExplainer(model, tokenizer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def process_final_text(text):
|
33 |
X_test = str(text).lower()
|
34 |
|
35 |
+
# Encode the input and get model output
|
36 |
encoded_input = tokenizer(X_test, return_tensors='pt')
|
37 |
output = model(**encoded_input)
|
38 |
scores = output[0][0].detach().numpy()
|
39 |
scores = tf.nn.softmax(scores)
|
40 |
|
41 |
+
# Get Keywords
|
42 |
keywords = custom_kw_extractor.extract_keywords(X_test)
|
43 |
letter = []
|
44 |
score = []
|
45 |
for i in keywords:
|
46 |
+
if i[1] > 0.4:
|
47 |
a = "+++"
|
48 |
+
elif (i[1] <= 0.4) and (i[1] > 0.1):
|
49 |
a = "++"
|
50 |
+
elif (i[1] <= 0.1) and (i[1] > 0.01):
|
51 |
a = "+"
|
52 |
else:
|
53 |
a = "NA"
|
|
|
54 |
letter.append(i[0])
|
55 |
score.append(a)
|
56 |
|
57 |
+
keywords = [(letter[i], score[i]) for i in range(len(letter))]
|
58 |
|
59 |
+
# Get NER
|
|
|
60 |
doc = nlp(text)
|
61 |
sp_html = displacy.render(doc, style="ent", page=True, jupyter=False)
|
62 |
+
NER = sp_html
|
|
|
|
|
|
|
|
|
63 |
|
64 |
+
# Transformer Interpret
|
65 |
word_attributions = cls_explainer(X_test)
|
66 |
letter = []
|
67 |
score = []
|
68 |
for i in word_attributions:
|
69 |
+
if i[1] > 0.5:
|
70 |
a = "++"
|
71 |
+
elif (i[1] <= 0.5) and (i[1] > 0.1):
|
72 |
a = "+"
|
73 |
+
elif (i[1] >= -0.5) and (i[1] < -0.1):
|
74 |
a = "-"
|
75 |
+
elif i[1] < -0.5:
|
76 |
a = "--"
|
77 |
else:
|
78 |
a = "NA"
|
|
|
79 |
letter.append(i[0])
|
80 |
score.append(a)
|
81 |
|
82 |
+
word_attributions = [(letter[i], score[i]) for i in range(len(letter))]
|
83 |
|
84 |
+
return {"Inspirational": float(scores.numpy()[1]), "Uninspiring": float(scores.numpy()[0])}, keywords, NER, word_attributions
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
def main(prob1):
|
87 |
text = str(prob1)
|
88 |
obj = process_final_text(text)
|
89 |
+
return obj[0], obj[1], obj[2], obj[3]
|
90 |
|
91 |
title = "Welcome to **HopeHarbor**! ✨"
|
92 |
description1 = """
|
|
|
96 |
gr.Markdown(f"## {title}")
|
97 |
gr.Markdown(description1)
|
98 |
gr.Markdown("""---""")
|
99 |
+
prob1 = gr.Textbox(label="Enter Your Text Here:", lines=2, placeholder="Type it here ...")
|
100 |
submit_btn = gr.Button("Analyze")
|
|
|
|
|
101 |
|
102 |
with gr.Column(visible=True) as output_col:
|
103 |
+
label = gr.Label(label="Predicted Label")
|
104 |
+
impplot = gr.HighlightedText(
|
105 |
+
label="Important Words",
|
106 |
+
combine_adjacent=False,
|
107 |
+
color_map={"+++": "royalblue", "++": "cornflowerblue", "+": "lightsteelblue", "NA": "white"}
|
108 |
+
)
|
109 |
+
NER = gr.HTML(label='NER:')
|
110 |
+
intp = gr.HighlightedText(
|
111 |
+
label="Word Scores",
|
112 |
+
combine_adjacent=False,
|
113 |
+
color_map={"++": "darkgreen", "+": "green", "--": "darkred", "-": "red", "NA": "white"}
|
114 |
+
)
|
115 |
|
116 |
submit_btn.click(
|
117 |
main,
|
118 |
[prob1],
|
119 |
+
[label, impplot, NER, intp],
|
120 |
+
api_name="ResText"
|
121 |
)
|
122 |
|
123 |
gr.Markdown("### Click on any of the examples below to see to what extent they contain resilience messaging:")
|
124 |
+
gr.Examples(
|
125 |
+
examples=[
|
126 |
+
["Please stay at home and avoid unnecessary trips."],
|
127 |
+
["Please stay at home and avoid unnecessary trips. We will survive this."],
|
128 |
+
["We will survive this."],
|
129 |
+
["Watch today’s news briefing with the latest updates on COVID-19 in Connecticut."],
|
130 |
+
["So let's keep doing what we know works. Let's stay strong, and let's beat this virus. I know we can, and I know we can come out stronger on the other side."],
|
131 |
+
["It is really wonderful how much resilience there is in human nature. Let any obstructing cause, no matter what, be removed in any way, even by death, and we fly back to first principles of hope and enjoyment."],
|
132 |
+
["Resilience is accepting your new reality, even if it’s less good than the one you had before. You can fight it, you can do nothing but scream about what you’ve lost, or you can accept that and try to put together something that’s good."],
|
133 |
+
["You survived all of the days you thought you couldn't, never underestimate your resilience."],
|
134 |
+
["Like tiny seeds with potent power to push through tough ground and become mighty trees, we hold innate reserves of unimaginable strength. We are resilient."]
|
135 |
+
],
|
136 |
+
inputs=[prob1],
|
137 |
+
outputs=[label, impplot, NER, intp],
|
138 |
+
fn=main,
|
139 |
+
cache_examples=True
|
140 |
+
)
|
141 |
|
142 |
+
demo.launch()
|