Spaces:
Runtime error
Runtime error
File size: 5,001 Bytes
64ab470 c00a75f 64ab470 c00a75f 64ab470 c00a75f 64ab470 c00a75f 64ab470 c00a75f 64ab470 c00a75f 64ab470 2cc38ad 64ab470 2cc38ad 64ab470 2cc38ad 64ab470 e45808b 6cfc464 e45808b 64ab470 c00a75f 64ab470 2cc38ad c00a75f 64ab470 c00a75f 2a728c7 2cc38ad 64ab470 b06fb6c c00a75f f746c89 e45808b f746c89 c00a75f 64ab470 2cc38ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import gradio as gr
import numpy as np
import pandas as pd
from datetime import datetime
import os
import re
from huggingface_hub import hf_hub_url, cached_download
from gensim.models.fasttext import load_facebook_model
ACCESS_KEY = os.environ.get('ACCESS_KEY')
# Setup model
url = hf_hub_url(repo_id="simonschoe/call2vec", filename="model.bin")
cached_download(url)
model = load_facebook_model(cached_download(url))
def semantic_search(_input, n):
""" Perform semantic search """
_input = re.split('[,;\n]', _input)
_input = [s.strip().lower().replace(' ', '_') for s in _input if s]
if _input[0] != ACCESS_KEY:
with open('log.txt', 'a') as f:
f.write(str(datetime.now()) + '+++' + '___'.join(_input) + '\n')
if len(_input) > 1:
avg_input = np.stack([model.wv[w] for w in _input], axis=0).mean(axis=0)
nearest_neighbours = model.wv.most_similar(positive=avg_input, topn=n)
frequencies = [model.wv.get_vecattr(nn[0], 'count') for nn in nearest_neighbours]
else:
nearest_neighbours = model.wv.most_similar(positive=_input[0], topn=n)
frequencies = [model.wv.get_vecattr(nn[0], 'count') for nn in nearest_neighbours]
if _input[0] == ACCESS_KEY:
with open('log.txt', 'r') as f:
prompts = f.readlines()
prompts = [p.strip().split('+++') for p in prompts]
result = pd.DataFrame(prompts,
columns=['Time', 'Prompt'])
else:
result = pd.DataFrame([(a[0],a[1],b) for a,b in zip(nearest_neighbours, frequencies)],
columns=['Token', 'Cosine Similarity', 'Corpus Frequency'])
result.to_csv('result.csv')
return result, 'result.csv', '\n'.join(_input)
app = gr.Blocks()
with app:
gr.Markdown("# Call2Vec")
gr.Markdown("## Semantic Search in Quarterly Earnings Conference Calls")
with gr.Row():
with gr.Column():
text_in = gr.Textbox(lines=1, placeholder="Insert text", label="Search Query")
with gr.Row():
n = gr.Slider(value=50, minimum=5, maximum=500, step=5, label="Number of Neighbours")
compute_bt = gr.Button("Search")
df_out = gr.Dataframe(interactive=False)
f_out = gr.File(interactive=False, label="Download")
with gr.Column():
gr.Markdown(
"""
#### Project Description
Call2Vec is a [fastText](https://fasttext.cc/) word embedding model trained via [Gensim](https://radimrehurek.com/gensim/). It maps each token in the vocabulary into a dense, 300-dimensional vector space, designed for performing semantic search.
The model is trained on a large sample of quarterly earnings conference calls, held by U.S. firms during the 2006-2022 period. In particular, the training data is restriced to the (rather sponentous) executives' remarks of the Q&A section of the call. The data has been preprocessed prior to model training via stop word removal, lemmatization, named entity masking, and coocurrence modeling.
"""
)
gr.Markdown(
"""
#### App usage
The model is intented to be used for **semantic search**: It encodes the search query (entered in the textbox on the right) in a dense vector space and finds semantic neighbours, i.e., token which frequently occur within similar contexts in the underlying training data.
The model allows for two use cases:
1. *Single Search:* The input query consists of a single word. When provided a bi-, tri-, or even fourgram, the quality of the model output depends on the presence of the query token in the model's vocabulary. N-grams should be concated by an underscore (e.g., "machine_learning" or "artifical_intelligence").
2. *Multi Search:* The input query may consist of several words or n-grams, seperated by comma, semi-colon or newline. It then computes the average vector over all inputs and performs semantic search based on the average input token.
"""
)
gr.Examples(
examples = [
["transformation", 20],
["climate_change", 50],
["risk, political_risk, uncertainty", 250],
],
inputs = [text_in, n],
outputs = [df_out, f_out, text_in],
fn = semantic_search,
cache_examples=True
)
gr.Markdown(
"""
<p style="text-align: center;">
Call2Vec by X and Y
<br>
<img 'id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=simonschoe.call2vec&left_color=green&right_color=blue" />
</p>
"""
)
compute_bt.click(semantic_search, inputs=[text_in, n], outputs=[df_out, f_out, text_in])
app.launch()
|