Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
def
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
iface = gr.Interface(fn=
|
7 |
iface.launch()
|
|
|
1 |
+
# from flask import Flask, render_template, request
|
2 |
+
from sentence_transformers import util
|
3 |
+
import torch
|
4 |
+
from semantic import load_corpus_and_model
|
5 |
+
# app = Flask(__name__)
|
6 |
+
|
7 |
+
query_prefix = "query: "
|
8 |
+
|
9 |
+
# # Load the pre-encoded answers from the file
|
10 |
+
answers_emb = torch.load('encoded_answers.pt')
|
11 |
+
test_queries, test_doc, model = load_corpus_and_model()
|
12 |
+
|
13 |
import gradio as gr
|
14 |
|
15 |
+
def query(q):
|
16 |
+
user_query = q
|
17 |
+
query_emb = model.encode([query_prefix + user_query], convert_to_tensor=True, show_progress_bar=False)
|
18 |
+
best_answer_index = util.cos_sim(query_emb, answers_emb).argmax().item()
|
19 |
+
best_answer_key = list(test_doc.keys())[best_answer_index]
|
20 |
+
best_answer = test_doc[best_answer_key]
|
21 |
+
return best_answer
|
22 |
|
23 |
+
iface = gr.Interface(fn=query, inputs="text", outputs="text")
|
24 |
iface.launch()
|