Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ from langchain.chains import RetrievalQA
|
|
25 |
#tasks such as sentiment analysis, entity extraction, and content creation. The types of content that the PaLM 2 for
|
26 |
#Text models can create include document summaries, answers to questions, and labels that classify content.
|
27 |
|
28 |
-
llm = HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.2", Temperature=0.
|
29 |
#model = SentenceTransformer("all-MiniLM-L6-v2")
|
30 |
|
31 |
#llm = VertexAI(model_name="text-bison@001",max_output_tokens=256,temperature=0.1,top_p=0.8,top_k=40,verbose=True,)
|
@@ -51,7 +51,7 @@ def get_text(url):
|
|
51 |
for paragraph in paragraphs:
|
52 |
file.write(paragraph.get_text() + "\n")
|
53 |
|
54 |
-
|
55 |
def create_langchain_index(input_text):
|
56 |
print("--indexing---")
|
57 |
get_text(input_text)
|
@@ -65,8 +65,8 @@ def create_langchain_index(input_text):
|
|
65 |
# load it into Chroma
|
66 |
db = Chroma.from_documents(docs, embeddings)
|
67 |
persist_directory = "chroma_db"
|
68 |
-
|
69 |
-
|
70 |
return db
|
71 |
|
72 |
# @st.cache_resource
|
@@ -79,7 +79,7 @@ def create_langchain_index(input_text):
|
|
79 |
# return summary_response,tweet_response,ln_response
|
80 |
|
81 |
|
82 |
-
|
83 |
def get_response(input_text,query,db):
|
84 |
print(f"--querying---{query}")
|
85 |
retrieval_chain = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=db.as_retriever())
|
|
|
25 |
#tasks such as sentiment analysis, entity extraction, and content creation. The types of content that the PaLM 2 for
|
26 |
#Text models can create include document summaries, answers to questions, and labels that classify content.
|
27 |
|
28 |
+
llm = HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.2", Temperature=0.3)
|
29 |
#model = SentenceTransformer("all-MiniLM-L6-v2")
|
30 |
|
31 |
#llm = VertexAI(model_name="text-bison@001",max_output_tokens=256,temperature=0.1,top_p=0.8,top_k=40,verbose=True,)
|
|
|
51 |
for paragraph in paragraphs:
|
52 |
file.write(paragraph.get_text() + "\n")
|
53 |
|
54 |
+
@st.cache_resource
|
55 |
def create_langchain_index(input_text):
|
56 |
print("--indexing---")
|
57 |
get_text(input_text)
|
|
|
65 |
# load it into Chroma
|
66 |
db = Chroma.from_documents(docs, embeddings)
|
67 |
persist_directory = "chroma_db"
|
68 |
+
vectordb = Chroma.from_documents(documents=docs, embedding=embeddings, persist_directory=persist_directory)
|
69 |
+
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
|
70 |
return db
|
71 |
|
72 |
# @st.cache_resource
|
|
|
79 |
# return summary_response,tweet_response,ln_response
|
80 |
|
81 |
|
82 |
+
@st.cache_data
|
83 |
def get_response(input_text,query,db):
|
84 |
print(f"--querying---{query}")
|
85 |
retrieval_chain = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=db.as_retriever())
|