Shredder commited on
Commit
c98525f
1 Parent(s): 2c330bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -11,8 +11,8 @@ from nltk.tokenize import sent_tokenize
11
  from fin_readability_sustainability import BERTClass, do_predict
12
  import pandas as pd
13
  import en_core_web_sm
14
- from fincat_utils import extract_context_words
15
- from fincat_utils import bert_embedding_extract
16
  from score_fincat import score_fincat
17
  import pickle
18
  #lr_clf = pickle.load(open("lr_clf_FiNCAT.pickle",'rb'))
@@ -41,9 +41,6 @@ def get_sustainability(text):
41
  return highlight
42
  #SUSTAINABILITY ENDS
43
 
44
- #CLAIM STARTS
45
-
46
-
47
 
48
  ##Summarization
49
  summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
@@ -52,7 +49,7 @@ def summarize_text(text):
52
  stext = resp[0]['summary_text']
53
  return stext
54
 
55
-
56
  def split_in_sentences(text):
57
  doc = nlp(text)
58
  return [str(sent).strip() for sent in doc.sents]
@@ -63,7 +60,7 @@ def make_spans(text,results):
63
  facts_spans = []
64
  facts_spans = list(zip(split_in_sentences(text),results_list))
65
  return facts_spans
66
- ##Forward Looking Statement
67
  fls_model = pipeline("text-classification", model="yiyanghkust/finbert-fls", tokenizer="yiyanghkust/finbert-fls")
68
  def fls(text):
69
  results = fls_model(split_in_sentences(text))
 
11
  from fin_readability_sustainability import BERTClass, do_predict
12
  import pandas as pd
13
  import en_core_web_sm
14
+ #from fincat_utils import extract_context_words
15
+ #from fincat_utils import bert_embedding_extract
16
  from score_fincat import score_fincat
17
  import pickle
18
  #lr_clf = pickle.load(open("lr_clf_FiNCAT.pickle",'rb'))
 
41
  return highlight
42
  #SUSTAINABILITY ENDS
43
 
 
 
 
44
 
45
  ##Summarization
46
  summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
 
49
  stext = resp[0]['summary_text']
50
  return stext
51
 
52
+ ##Forward Looking Statement
53
  def split_in_sentences(text):
54
  doc = nlp(text)
55
  return [str(sent).strip() for sent in doc.sents]
 
60
  facts_spans = []
61
  facts_spans = list(zip(split_in_sentences(text),results_list))
62
  return facts_spans
63
+
64
  fls_model = pipeline("text-classification", model="yiyanghkust/finbert-fls", tokenizer="yiyanghkust/finbert-fls")
65
  def fls(text):
66
  results = fls_model(split_in_sentences(text))