Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -40,7 +40,7 @@ def initialize_vector_store_index(data_path, service_context):
|
|
40 |
return index
|
41 |
|
42 |
# Configure and initialize components
|
43 |
-
|
44 |
llm = configure_llama_model()
|
45 |
embed_model = configure_embeddings()
|
46 |
service_context = configure_service_context(llm, embed_model)
|
@@ -51,11 +51,7 @@ query_engine = index.as_query_engine()
|
|
51 |
def get_response(text, username):
|
52 |
# For simplicity, we are only using the 'text' argument
|
53 |
response = str(query_engine.query(text))
|
54 |
-
|
55 |
-
dit[text]=response
|
56 |
-
return response
|
57 |
-
else:
|
58 |
-
return dit[text]
|
59 |
|
60 |
|
61 |
gr.ChatInterface(get_response).launch(debug=True,share=True)
|
|
|
40 |
return index
|
41 |
|
42 |
# Configure and initialize components
|
43 |
+
|
44 |
llm = configure_llama_model()
|
45 |
embed_model = configure_embeddings()
|
46 |
service_context = configure_service_context(llm, embed_model)
|
|
|
51 |
def get_response(text, username):
|
52 |
# For simplicity, we are only using the 'text' argument
|
53 |
response = str(query_engine.query(text))
|
54 |
+
return response
|
|
|
|
|
|
|
|
|
55 |
|
56 |
|
57 |
gr.ChatInterface(get_response).launch(debug=True,share=True)
|