Spaces:
Sleeping
Sleeping
switched back to langchain
Browse files- backend2.py +2 -1
backend2.py
CHANGED
@@ -53,6 +53,7 @@ def load_documents(directory):
|
|
53 |
logger.debug("Loaded %d documents in %.2f seconds.", len(documents), end_time - start_time)
|
54 |
return documents
|
55 |
|
|
|
56 |
|
57 |
def prepare_documents(documents):
|
58 |
logger.debug("Preparing documents for embedding.")
|
@@ -67,7 +68,7 @@ def prepare_documents(documents):
|
|
67 |
return None
|
68 |
|
69 |
modelPath = "sentence-transformers/all-MiniLM-l6-v2"
|
70 |
-
model_kwargs = {'device':
|
71 |
encode_kwargs = {'normalize_embeddings': False}
|
72 |
embeddings = HuggingFaceEmbeddings(model_name=modelPath, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs )
|
73 |
|
|
|
53 |
logger.debug("Loaded %d documents in %.2f seconds.", len(documents), end_time - start_time)
|
54 |
return documents
|
55 |
|
56 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
57 |
|
58 |
def prepare_documents(documents):
|
59 |
logger.debug("Preparing documents for embedding.")
|
|
|
68 |
return None
|
69 |
|
70 |
modelPath = "sentence-transformers/all-MiniLM-l6-v2"
|
71 |
+
model_kwargs = {'device': device}
|
72 |
encode_kwargs = {'normalize_embeddings': False}
|
73 |
embeddings = HuggingFaceEmbeddings(model_name=modelPath, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs )
|
74 |
|