SiraH commited on
Commit
37593b3
1 Parent(s): b31680c

Update app.py

Browse files

change to ver1

Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -6,28 +6,27 @@ import pathlib
6
  from tempfile import NamedTemporaryFile
7
 
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
9
- from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
10
- from langchain_community.llms import LlamaCpp
11
  from langchain import PromptTemplate, LLMChain
12
  from langchain.callbacks.manager import CallbackManager
13
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
14
- from langchain_community.embeddings import HuggingFaceEmbeddings
15
  from langchain.chains import RetrievalQA
16
- from langchain_community.vectorstores import FAISS
17
  from PyPDF2 import PdfReader
18
  import os
19
  import time
20
  from langchain.chains.question_answering import load_qa_chain
21
  from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
22
 
23
- from langchain_community.document_loaders import TextLoader
24
- from langchain_community.document_loaders import PyPDFLoader
25
- # from langchain.document_loaders import PyPDFLoader
26
- # from langchain.document_loaders import Docx2txtLoader
27
- # from langchain.document_loaders.image import UnstructuredImageLoader
28
- # from langchain.document_loaders import UnstructuredHTMLLoader
29
- # from langchain.document_loaders import UnstructuredPowerPointLoader
30
- # from langchain.document_loaders import TextLoader
31
  from langchain.memory import ConversationBufferWindowMemory
32
 
33
  from langchain.memory import ConversationBufferMemory
@@ -168,7 +167,7 @@ def split_docs(documents,chunk_size=1000):
168
 
169
  @st.cache_resource
170
  def load_llama2_llamaCpp():
171
- core_model_name = "phi-2.Q4_K_M.gguf"
172
  #n_gpu_layers = 32
173
  n_batch = 512
174
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
 
6
  from tempfile import NamedTemporaryFile
7
 
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
9
+ from langchain.llms import HuggingFacePipeline
10
+ from langchain.llms import LlamaCpp
11
  from langchain import PromptTemplate, LLMChain
12
  from langchain.callbacks.manager import CallbackManager
13
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
14
+ from langchain.embeddings import HuggingFaceEmbeddings
15
  from langchain.chains import RetrievalQA
16
+ from langchain.vectorstores import FAISS
17
  from PyPDF2 import PdfReader
18
  import os
19
  import time
20
  from langchain.chains.question_answering import load_qa_chain
21
  from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
22
 
23
+ from langchain.document_loaders import TextLoader
24
+ from langchain.document_loaders import PyPDFLoader
25
+ from langchain.document_loaders import Docx2txtLoader
26
+ from langchain.document_loaders.image import UnstructuredImageLoader
27
+ from langchain.document_loaders import UnstructuredHTMLLoader
28
+ from langchain.document_loaders import UnstructuredPowerPointLoader
29
+ from langchain.document_loaders import TextLoader
 
30
  from langchain.memory import ConversationBufferWindowMemory
31
 
32
  from langchain.memory import ConversationBufferMemory
 
167
 
168
  @st.cache_resource
169
  def load_llama2_llamaCpp():
170
+ core_model_name = "llama-2-7b-chat.Q4_0.gguf"
171
  #n_gpu_layers = 32
172
  n_batch = 512
173
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])