Red-Tech-Hub
commited on
Commit
•
3c216f2
1
Parent(s):
89a30f5
Update run.py
Browse files
run.py
CHANGED
@@ -1,44 +1,47 @@
|
|
1 |
-
from langchain.prompts import ChatPromptTemplate
|
2 |
-
from langchain_core.output_parsers import StrOutputParser
|
3 |
-
from langchain_core.runnables import RunnablePassthrough
|
4 |
-
from langchain_community.
|
5 |
-
from
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import ChatPromptTemplate
|
2 |
+
from langchain_core.output_parsers import StrOutputParser
|
3 |
+
from langchain_core.runnables import RunnablePassthrough
|
4 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
5 |
+
from langchain_community.vectorstores import Chroma
|
6 |
+
from transformers import AutoModelForCausalLM
|
7 |
+
|
8 |
+
model_kwargs = {'trust_remote_code': True}
|
9 |
+
|
10 |
+
embedding = AutoModelForCausalLM.from_pretrained(
|
11 |
+
"nomic-embed-text-v1.5.Q4_k_S.gguf",
|
12 |
+
model_type='llama',
|
13 |
+
threads=3,
|
14 |
+
)
|
15 |
+
|
16 |
+
db = Chroma(
|
17 |
+
persist_directory="./chroma_db",
|
18 |
+
embedding_function=embedding,
|
19 |
+
collection_name='CVE'
|
20 |
+
)
|
21 |
+
|
22 |
+
retriever = db.as_retriever()
|
23 |
+
|
24 |
+
template = """Answer the question based only on the following context:
|
25 |
+
{context}
|
26 |
+
Do not tell the source of the data
|
27 |
+
Question: {question}
|
28 |
+
"""
|
29 |
+
|
30 |
+
prompt = ChatPromptTemplate.from_template(template)
|
31 |
+
|
32 |
+
model = AutoModelForCausalLM.from_pretrained(
|
33 |
+
"zephyr-7b-beta.Q4_K_S.gguf",
|
34 |
+
model_type='mistral',
|
35 |
+
threads=3,
|
36 |
+
)
|
37 |
+
|
38 |
+
chain = (
|
39 |
+
{"context": retriever, "question": RunnablePassthrough()}
|
40 |
+
| prompt
|
41 |
+
| model
|
42 |
+
| StrOutputParser()
|
43 |
+
)
|
44 |
+
|
45 |
+
# Uncomment and use the following for testing
|
46 |
+
# for chunk in chain.stream("Your question here"):
|
47 |
+
# print(chunk, end="", flush=True)
|