EC2 Default User commited on
Commit
478a501
1 Parent(s): eaed8ab

update .gitattributes so git lfs will track .pkl files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. app.py +45 -0
  3. requirements.txt +3 -0
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.pkl filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer, CrossEncoder, util
2
+ import torch
3
+ import pickle
4
+ import pandas as pd
5
+ import gradio as gr
6
+
7
+ bi_encoder = SentenceTransformer("multi-qa-MiniLM-L6-cos-v1")
8
+ cross_encoder = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-6-v2")
9
+ corpus_embeddings=pd.read_pickle("corpus_embeddings_cpu.pkl")
10
+ corpus=pd.read_pickle("corpus.pkl")
11
+
12
+ def search(query,top_k=100):
13
+ print("Top 5 Answer by the NSE:")
14
+ print()
15
+ ans=[]
16
+ ##### Sematic Search #####
17
+ # Encode the query using the bi-encoder and find potentially relevant passages
18
+ question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
19
+ hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
20
+ hits = hits[0] # Get the hits for the first query
21
+
22
+ ##### Re-Ranking #####
23
+ # Now, score all retrieved passages with the cross_encoder
24
+ cross_inp = [[query, corpus[hit['corpus_id']]] for hit in hits]
25
+ cross_scores = cross_encoder.predict(cross_inp)
26
+
27
+ # Sort results by the cross-encoder scores
28
+ for idx in range(len(cross_scores)):
29
+ hits[idx]['cross-score'] = cross_scores[idx]
30
+
31
+ hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
32
+
33
+ for idx, hit in enumerate(hits[0:5]):
34
+ ans.append(corpus[hit['corpus_id']])
35
+ return ans[0],ans[1],ans[2],ans[3],ans[4]
36
+
37
+ exp=["Who is steve jobs?","What is coldplay?","What is a turing test?","What is the most interesting thing about our universe?","What are the most beautiful places on earth?"]
38
+
39
+ desc="This is a semantic search engine powered by SentenceTransformers (Nils_Reimers) with a retrieval and reranking system on Wikipedia corpus. This will return the top 5 results. So Quest on with Transformers."
40
+
41
+ inp=gr.inputs.Textbox(lines=1, placeholder=None, default="", label="search you query here")
42
+ out=gr.outputs.Textbox(type="auto",label="search results")
43
+
44
+ iface = gr.Interface(fn=search, inputs=inp, outputs=[out,out,out,out,out],examples=exp,article=desc,title="Neural Search Engine",theme="huggingface",layout='vertical')
45
+ iface.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ sentence-transformers==2.1.0
2
+ torch==1.10.0
3
+ pandas==1.1.5