tony346 commited on
Commit
9b4aeb4
1 Parent(s): e19856d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -43,13 +43,14 @@ def get_text_chunks(text):
43
 
44
  def get_vectorstore(text_chunks):
45
  # Load the desired embeddings model.
46
- # embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
47
- # model_kwargs={'device': 'cpu'})
 
48
  # embeddings = OpenAIEmbeddings()sentence-transformers/all-MiniLM-L6-v2
49
- embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
50
- model_kwargs={'device':'cpu'})
51
- # vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
52
- vectorstore = Chroma.from_texts(texts=text_chunks, embedding=embeddings)
53
 
54
  return vectorstore
55
 
@@ -58,6 +59,8 @@ def get_conversation_chain(vectorstore):
58
  # llm = ChatOpenAI()
59
  # llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
60
  config = {'max_new_tokens': 2048}
 
 
61
  llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q2_K.bin", model_type="llama", config=config)
62
  memory = ConversationBufferMemory(
63
  memory_key='chat_history', return_messages=True)
 
43
 
44
  def get_vectorstore(text_chunks):
45
  # Load the desired embeddings model.
46
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
47
+ model_kwargs={'device': 'cpu'})
48
+ print('embeddings = ', embeddings)
49
  # embeddings = OpenAIEmbeddings()sentence-transformers/all-MiniLM-L6-v2
50
+ # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
51
+ # model_kwargs={'device':'cpu'})
52
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
53
+ # vectorstore = Chroma.from_texts(texts=text_chunks, embedding=embeddings)
54
 
55
  return vectorstore
56
 
 
59
  # llm = ChatOpenAI()
60
  # llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
61
  config = {'max_new_tokens': 2048}
62
+
63
+
64
  llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q2_K.bin", model_type="llama", config=config)
65
  memory = ConversationBufferMemory(
66
  memory_key='chat_history', return_messages=True)