File size: 11,622 Bytes
2b5cb3b d1dcb4e 2b5cb3b a1dfd05 d61a02b a1dfd05 3f0f70e 30b1b63 3f0f70e 80b1e90 3f0f70e 30b1b63 e7ec23a 3f0f70e e7ec23a 2b5cb3b a1dfd05 d1dcb4e 2b5cb3b d1dcb4e 2b5cb3b d1dcb4e d39ea15 d1dcb4e 2b5cb3b d1dcb4e 2b5cb3b d1dcb4e 2b5cb3b d39ea15 2b5cb3b d39ea15 2b5cb3b 3f0f70e 2b5cb3b d1dcb4e 2b5cb3b a1dfd05 d39ea15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 |
import whisper
import os
from pytube import YouTube
import pandas as pd
import plotly_express as px
import nltk
import plotly.graph_objects as go
from optimum.onnxruntime import ORTModelForSequenceClassification
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
from sentence_transformers import SentenceTransformer, CrossEncoder, util
import streamlit as st
import en_core_web_lg
nltk.download('punkt')
from nltk import sent_tokenize
st.set_page_config(
page_title="Home",
page_icon="π",
)
st.sidebar.header("Home")
st.markdown("## Earnings Call Analysis Whisperer")
st.markdown(
"""
This app assists finance analysts with transcribing and analysis Earnings Calls by carrying out the following tasks:
- Transcribing earnings calls using Open AI's [Whisper](https://github.com/openai/whisper).
- Analysing the sentiment of transcribed text using the quantized version of [FinBert-Tone](https://huggingface.co/nickmuchi/quantized-optimum-finbert-tone).
- Summarization of the call with [FaceBook-Bart-Large-CNN](https://huggingface.co/facebook/bart-large-cnn) model with entity extraction
- Semantic search engine with [Sentence-Transformers](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) and reranking results with a Cross-Encoder.
**π Enter a YouTube Earnings Call URL below and navigate to the sidebar tabs**
"""
)
url_input = st.text_input(
label='Enter YouTube URL, e.g "https://www.youtube.com/watch?v=8pmbScvyfeY"', key="url")
st.markdown(
"<h3 style='text-align: center; color: red;'>OR</h3>",
unsafe_allow_html=True
)
upload_wav = st.file_uploader("Upload a .wav sound file ",key="upload")
auth_token = os.environ.get("auth_token")
progress_bar = st.sidebar.progress(0)
@st.experimental_singleton(suppress_st_warning=True)
def load_models():
asr_model = whisper.load_model("small")
q_model = ORTModelForSequenceClassification.from_pretrained("nickmuchi/quantized-optimum-finbert-tone")
ner_model = AutoModelForTokenClassification.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
q_tokenizer = AutoTokenizer.from_pretrained("nickmuchi/quantized-optimum-finbert-tone")
ner_tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
sent_pipe = pipeline("text-classification",model=q_model, tokenizer=q_tokenizer)
sum_pipe = pipeline("summarization",model="facebook/bart-large-cnn", tokenizer="facebook/bart-large-cnn")
ner_pip = pipeline("ner", model=model, tokenizer=tokenizer, grouped_entities=True)
sbert = SentenceTransformer("all-mpnet-base-v2")
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
return asr_model, sent_pipe, sum_pipe, ner_pipe, sbert, cross_encoder
@st.experimental_singleton(suppress_st_warning=True)
def get_spacy():
nlp = en_core_web_lg.load()
return nlp
nlp = get_spacy()
asr_model, sent_pipe, sum_pipe, ner_pipe, sbert, cross_encoder = load_models()
@st.experimental_memo(suppress_st_warning=True)
def inference(link, upload):
'''Convert Youtube video or Audio upload to text'''
if validators.url(link):
yt = YouTube(link)
title = yt.title
path = yt.streams.filter(only_audio=True)[0].download(filename="audio.mp4")
options = whisper.DecodingOptions(without_timestamps=True)
results = asr_model.transcribe(path)
return results, yt.title
elif upload:
results = asr_model.transcribe(upload)
return results, "Transcribed Earnings Audio"
@st.experimental_memo(suppress_st_warning=True)
def sentiment_pipe(earnings_text):
'''Determine the sentiment of the text'''
earnings_sentences = sent_tokenize(earnings_text)
earnings_sentiment = sent_pipe(earnings_sentences)
return earnings_sentiment, earnings_sentences
@st.experimental_memo(suppress_st_warning=True)
def preprocess_plain_text(text,window_size=3):
'''Preprocess text for semantic search'''
text = text.encode("ascii", "ignore").decode() # unicode
text = re.sub(r"https*\S+", " ", text) # url
text = re.sub(r"@\S+", " ", text) # mentions
text = re.sub(r"#\S+", " ", text) # hastags
text = re.sub(r"\s{2,}", " ", text) # over spaces
#text = re.sub("[^.,!?%$A-Za-z0-9]+", " ", text) # special characters except .,!?
#break into lines and remove leading and trailing space on each
lines = [line.strip() for line in text.splitlines()]
# #break multi-headlines into a line each
chunks = [phrase.strip() for line in lines for phrase in line.split(" ")]
# # drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
## We split this article into paragraphs and then every paragraph into sentences
paragraphs = []
for paragraph in text.replace('\n',' ').split("\n\n"):
if len(paragraph.strip()) > 0:
paragraphs.append(sent_tokenize(paragraph.strip()))
#We combine up to 3 sentences into a passage. You can choose smaller or larger values for window_size
#Smaller value: Context from other sentences might get lost
#Lager values: More context from the paragraph remains, but results are longer
window_size = window_size
passages = []
for paragraph in paragraphs:
for start_idx in range(0, len(paragraph), window_size):
end_idx = min(start_idx+window_size, len(paragraph))
passages.append(" ".join(paragraph[start_idx:end_idx]))
print(f"Sentences: {sum([len(p) for p in paragraphs])}")
print(f"Passages: {len(passages)}")
return passages
@st.experimental_memo(suppress_st_warning=True)
def chunk_clean_text(text):
"""Chunk text longer than 500 tokens"""
article = nlp(text)
sentences = [i.text for i in list(article.sents)]
current_chunk = 0
chunks = []
for sentence in sentences:
if len(chunks) == current_chunk + 1:
if len(chunks[current_chunk]) + len(sentence.split(" ")) <= 500:
chunks[current_chunk].extend(sentence.split(" "))
else:
current_chunk += 1
chunks.append(sentence.split(" "))
else:
chunks.append(sentence.split(" "))
for chunk_id in range(len(chunks)):
chunks[chunk_id] = " ".join(chunks[chunk_id])
return chunks
def summary_downloader(raw_text):
b64 = base64.b64encode(raw_text.encode()).decode()
new_filename = "new_text_file_{}_.txt".format(time_str)
st.markdown("#### Download Summary as a File ###")
href = f'<a href="data:file/txt;base64,{b64}" download="{new_filename}">Click to Download!!</a>'
st.markdown(href,unsafe_allow_html=True)
def get_all_entities_per_sentence(text):
doc = nlp(''.join(text))
sentences = list(doc.sents)
entities_all_sentences = []
for sentence in sentences:
entities_this_sentence = []
# SPACY ENTITIES
for entity in sentence.ents:
entities_this_sentence.append(str(entity))
# FLAIR ENTITIES (CURRENTLY NOT USED)
# sentence_entities = Sentence(str(sentence))
# tagger.predict(sentence_entities)
# for entity in sentence_entities.get_spans('ner'):
# entities_this_sentence.append(entity.text)
# XLM ENTITIES
entities_xlm = [entity["word"] for entity in ner_model(str(sentence))]
for entity in entities_xlm:
entities_this_sentence.append(str(entity))
entities_all_sentences.append(entities_this_sentence)
return entities_all_sentences
def get_all_entities(text):
all_entities_per_sentence = get_all_entities_per_sentence(text)
return list(itertools.chain.from_iterable(all_entities_per_sentence))
def get_and_compare_entities(article_content,summary_output):
all_entities_per_sentence = get_all_entities_per_sentence(article_content)
entities_article = list(itertools.chain.from_iterable(all_entities_per_sentence))
all_entities_per_sentence = get_all_entities_per_sentence(summary_output)
entities_summary = list(itertools.chain.from_iterable(all_entities_per_sentence))
matched_entities = []
unmatched_entities = []
for entity in entities_summary:
if any(entity.lower() in substring_entity.lower() for substring_entity in entities_article):
matched_entities.append(entity)
elif any(
np.inner(sentence_embedding_model.encode(entity, show_progress_bar=False),
sentence_embedding_model.encode(art_entity, show_progress_bar=False)) > 0.9 for
art_entity in entities_article):
matched_entities.append(entity)
else:
unmatched_entities.append(entity)
matched_entities = list(dict.fromkeys(matched_entities))
unmatched_entities = list(dict.fromkeys(unmatched_entities))
matched_entities_to_remove = []
unmatched_entities_to_remove = []
for entity in matched_entities:
for substring_entity in matched_entities:
if entity != substring_entity and entity.lower() in substring_entity.lower():
matched_entities_to_remove.append(entity)
for entity in unmatched_entities:
for substring_entity in unmatched_entities:
if entity != substring_entity and entity.lower() in substring_entity.lower():
unmatched_entities_to_remove.append(entity)
matched_entities_to_remove = list(dict.fromkeys(matched_entities_to_remove))
unmatched_entities_to_remove = list(dict.fromkeys(unmatched_entities_to_remove))
for entity in matched_entities_to_remove:
matched_entities.remove(entity)
for entity in unmatched_entities_to_remove:
unmatched_entities.remove(entity)
return matched_entities, unmatched_entities
def highlight_entities(article_content,summary_output):
markdown_start_red = "<mark class=\"entity\" style=\"background: rgb(238, 135, 135);\">"
markdown_start_green = "<mark class=\"entity\" style=\"background: rgb(121, 236, 121);\">"
markdown_end = "</mark>"
matched_entities, unmatched_entities = get_and_compare_entities(article_content,summary_output)
print(summary_output)
for entity in matched_entities:
summary_output = re.sub(f'({entity})(?![^rgb\(]*\))',markdown_start_green + entity + markdown_end,summary_output)
for entity in unmatched_entities:
summary_output = re.sub(f'({entity})(?![^rgb\(]*\))',markdown_start_red + entity + markdown_end,summary_output)
print("")
print(summary_output)
print("")
print(summary_output)
soup = BeautifulSoup(summary_output, features="html.parser")
return HTML_WRAPPER.format(soup)
nlp = get_spacy()
def display_df_as_table(model,top_k,score='score'):
'''Display the df with text and scores as a table'''
df = pd.DataFrame([(hit[score],passages[hit['corpus_id']]) for hit in model[0:top_k]],columns=['Score','Text'])
df['Score'] = round(df['Score'],2)
return df
def make_spans(text,results):
results_list = []
for i in range(len(results)):
results_list.append(results[i]['label'])
facts_spans = []
facts_spans = list(zip(sent_tokenizer(text),results_list))
return facts_spans
##Fiscal Sentiment by Sentence
def fin_ext(text):
results = remote_clx(sent_tokenizer(text))
return make_spans(text,results)
progress_bar.empty() |