Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
-
from together import Together
|
4 |
from PyPDF2 import PdfReader
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
from langchain_community.vectorstores import FAISS
|
7 |
-
from langchain_openai import OpenAIEmbeddings
|
8 |
from langchain.prompts import ChatPromptTemplate
|
|
|
9 |
|
10 |
# Set up API client
|
11 |
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
|
@@ -27,7 +27,7 @@ def get_text_chunks(text):
|
|
27 |
|
28 |
# Function to create and save a FAISS vector store from text chunks
|
29 |
def get_vector_store(text_chunks):
|
30 |
-
embeddings = OpenAIEmbeddings(client=client)
|
31 |
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
|
32 |
vector_store.save_local("faiss_index")
|
33 |
|
@@ -36,13 +36,12 @@ def get_conversational_chain():
|
|
36 |
prompt_template = """Answer the question concisely, focusing on the most relevant and important details from the PDF context.
|
37 |
If the answer is not found within the PDF, please state 'answer is not available in the context.'"""
|
38 |
prompt = ChatPromptTemplate.from_template(prompt_template)
|
39 |
-
|
40 |
-
chain = load_qa_chain(model, chain_type="conversational", prompt=prompt)
|
41 |
return chain
|
42 |
|
43 |
# Function to process user question and provide a response
|
44 |
def user_input(user_question):
|
45 |
-
embeddings = OpenAIEmbeddings(client=client)
|
46 |
new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
|
47 |
docs = new_db.similarity_search(user_question)
|
48 |
chain = get_conversational_chain()
|
@@ -77,4 +76,4 @@ def main():
|
|
77 |
st.error(str(e))
|
78 |
|
79 |
if __name__ == "__main__":
|
80 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
|
|
3 |
from PyPDF2 import PdfReader
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain_community.vectorstores import FAISS
|
6 |
+
from langchain_openai import OpenAIEmbeddings # This might need to be adjusted if `Together` has its own embeddings module
|
7 |
from langchain.prompts import ChatPromptTemplate
|
8 |
+
from together import Together
|
9 |
|
10 |
# Set up API client
|
11 |
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
|
|
|
27 |
|
28 |
# Function to create and save a FAISS vector store from text chunks
|
29 |
def get_vector_store(text_chunks):
|
30 |
+
embeddings = OpenAIEmbeddings(client=client) # Adjust this if `Together` has its own method for embeddings
|
31 |
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
|
32 |
vector_store.save_local("faiss_index")
|
33 |
|
|
|
36 |
prompt_template = """Answer the question concisely, focusing on the most relevant and important details from the PDF context.
|
37 |
If the answer is not found within the PDF, please state 'answer is not available in the context.'"""
|
38 |
prompt = ChatPromptTemplate.from_template(prompt_template)
|
39 |
+
chain = load_qa_chain(client, "mistralai/Mixtral-8x7B-Instruct-v0.1", prompt=prompt) # Assuming load_qa_chain can accept Together client and model ID
|
|
|
40 |
return chain
|
41 |
|
42 |
# Function to process user question and provide a response
|
43 |
def user_input(user_question):
|
44 |
+
embeddings = OpenAIEmbeddings(client=client) # Adjust this if `Together` has its own method for embeddings
|
45 |
new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
|
46 |
docs = new_db.similarity_search(user_question)
|
47 |
chain = get_conversational_chain()
|
|
|
76 |
st.error(str(e))
|
77 |
|
78 |
if __name__ == "__main__":
|
79 |
+
main()
|