Spaces:
Sleeping
Sleeping
paragon-analytics
commited on
Commit
·
6585483
1
Parent(s):
b2ddb1b
Update app.py
Browse files
app.py
CHANGED
@@ -28,6 +28,14 @@ from spacy import displacy
|
|
28 |
import streamlit as st
|
29 |
import spacy_streamlit
|
30 |
nlp = spacy.load('en_core_web_sm')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
kw_extractor = yake.KeywordExtractor()
|
33 |
custom_kw_extractor = yake.KeywordExtractor(lan="en", n=2, dedupLim=0.2, top=10, features=None)
|
@@ -52,6 +60,11 @@ def process_final_text(text):
|
|
52 |
lstm_prob = lmodel.predict(test_sequences_matrix.tolist()).flatten()
|
53 |
lstm_pred = np.where(lstm_prob>=0.5,1,0)
|
54 |
|
|
|
|
|
|
|
|
|
|
|
55 |
# Get Keywords:
|
56 |
keywords = custom_kw_extractor.extract_keywords(X_test)
|
57 |
letter = []
|
@@ -80,7 +93,7 @@ def process_final_text(text):
|
|
80 |
+ sp_html
|
81 |
+ ""
|
82 |
)
|
83 |
-
return {"Resilience": float(
|
84 |
|
85 |
def main(prob1):
|
86 |
text = str(prob1)
|
@@ -117,6 +130,6 @@ with gr.Blocks(title=title) as demo:
|
|
117 |
|
118 |
|
119 |
gr.Markdown("### Click on any of the examples below to see how it works:")
|
120 |
-
gr.Examples([["
|
121 |
|
122 |
demo.launch()
|
|
|
28 |
import streamlit as st
|
29 |
import spacy_streamlit
|
30 |
nlp = spacy.load('en_core_web_sm')
|
31 |
+
import torch
|
32 |
+
import tensorflow as tf
|
33 |
+
from transformers import RobertaTokenizer, RobertaModel
|
34 |
+
from transformers import AutoModelForSequenceClassification
|
35 |
+
from transformers import TFAutoModelForSequenceClassification
|
36 |
+
from transformers import AutoTokenizer
|
37 |
+
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
38 |
+
model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/bert_resil")
|
39 |
|
40 |
kw_extractor = yake.KeywordExtractor()
|
41 |
custom_kw_extractor = yake.KeywordExtractor(lan="en", n=2, dedupLim=0.2, top=10, features=None)
|
|
|
60 |
lstm_prob = lmodel.predict(test_sequences_matrix.tolist()).flatten()
|
61 |
lstm_pred = np.where(lstm_prob>=0.5,1,0)
|
62 |
|
63 |
+
encoded_input = tokenizer(X_test, return_tensors='pt')
|
64 |
+
output = model(**encoded_input)
|
65 |
+
scores = output[0][0].detach().numpy()
|
66 |
+
scores = tf.nn.softmax(scores)
|
67 |
+
|
68 |
# Get Keywords:
|
69 |
keywords = custom_kw_extractor.extract_keywords(X_test)
|
70 |
letter = []
|
|
|
93 |
+ sp_html
|
94 |
+ ""
|
95 |
)
|
96 |
+
return {"Resilience": float(scores.numpy()[1]), "Non-Resilience": 1-float(scores.numpy()[0])},keywords,NER
|
97 |
|
98 |
def main(prob1):
|
99 |
text = str(prob1)
|
|
|
130 |
|
131 |
|
132 |
gr.Markdown("### Click on any of the examples below to see how it works:")
|
133 |
+
gr.Examples([["Please stay at home and avoid unnecessary trips."],["Please stay at home and avoid unnecessary trips. We will survive this."],["We will survive this."]], [prob1], [label,impplot,NER], main, cache_examples=True)
|
134 |
|
135 |
demo.launch()
|