Darka001 commited on
Commit
53204dd
·
verified ·
1 Parent(s): 75c35f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -21,11 +21,10 @@ from langchain.chains import LLMChain
21
  from langchain_core.runnables import RunnablePassthrough, RunnableParallel
22
 
23
 
24
- instructor_embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large",
25
- model_kwargs={"device": "cuda"})
26
 
 
27
 
28
- model_name='SherlockAssistant/Mistral-7B-Instruct-Ukrainian'
29
 
30
  tokenizer = AutoTokenizer.from_pretrained(model_name)
31
  #tokenizer.pad_token = tokenizer.unk_token
@@ -94,6 +93,7 @@ text_generation_pipeline = pipeline(
94
  )
95
  mistral_llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
96
  # # # load chroma from disk
 
97
  db3 = Chroma(persist_directory="/chroma/", embedding_function=instructor_embeddings)
98
 
99
 
 
21
  from langchain_core.runnables import RunnablePassthrough, RunnableParallel
22
 
23
 
 
 
24
 
25
+ model_name= 'mistralai/Mistral-7B-v0.1'
26
 
27
+ #model_name='SherlockAssistant/Mistral-7B-Instruct-Ukrainian'
28
 
29
  tokenizer = AutoTokenizer.from_pretrained(model_name)
30
  #tokenizer.pad_token = tokenizer.unk_token
 
93
  )
94
  mistral_llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
95
  # # # load chroma from disk
96
+ instructor_embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large")
97
  db3 = Chroma(persist_directory="/chroma/", embedding_function=instructor_embeddings)
98
 
99