Red-Tech-Hub commited on
Commit
3c216f2
1 Parent(s): 89a30f5

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +47 -44
run.py CHANGED
@@ -1,44 +1,47 @@
1
- from langchain.prompts import ChatPromptTemplate
2
- from langchain_core.output_parsers import StrOutputParser
3
- from langchain_core.runnables import RunnablePassthrough
4
- from langchain_community.vectorstores import Chroma
5
- from transformers import AutoModelForCausalLM
6
-
7
- embedding = AutoModelForCausalLM.from_pretrained(
8
- "deployllm/nomic-embed-text-v1.5-GGUF",
9
- model_type='llama',
10
- threads=3,
11
- )
12
-
13
- db = Chroma(
14
- persist_directory="./chroma_db",
15
- embedding_function=embedding,
16
- collection_name='CVE'
17
- )
18
-
19
- retriever = db.as_retriever()
20
-
21
- template = """Answer the question based only on the following context:
22
- {context}
23
- Do not tell the source of the data
24
- Question: {question}
25
- """
26
-
27
- prompt = ChatPromptTemplate.from_template(template)
28
-
29
- model = AutoModelForCausalLM.from_pretrained(
30
- "zephyr-7b-beta.Q4_K_S.gguf",
31
- model_type='mistral',
32
- threads=3,
33
- )
34
-
35
- chain = (
36
- {"context": retriever, "question": RunnablePassthrough()}
37
- | prompt
38
- | model
39
- | StrOutputParser()
40
- )
41
-
42
- # Uncomment and use the following for testing
43
- # for chunk in chain.stream("Your question here"):
44
- # print(chunk, end="", flush=True)
 
 
 
 
1
+ from langchain.prompts import ChatPromptTemplate
2
+ from langchain_core.output_parsers import StrOutputParser
3
+ from langchain_core.runnables import RunnablePassthrough
4
+ from langchain_community.embeddings import HuggingFaceEmbeddings
5
+ from langchain_community.vectorstores import Chroma
6
+ from transformers import AutoModelForCausalLM
7
+
8
+ model_kwargs = {'trust_remote_code': True}
9
+
10
+ embedding = AutoModelForCausalLM.from_pretrained(
11
+ "nomic-embed-text-v1.5.Q4_k_S.gguf",
12
+ model_type='llama',
13
+ threads=3,
14
+ )
15
+
16
+ db = Chroma(
17
+ persist_directory="./chroma_db",
18
+ embedding_function=embedding,
19
+ collection_name='CVE'
20
+ )
21
+
22
+ retriever = db.as_retriever()
23
+
24
+ template = """Answer the question based only on the following context:
25
+ {context}
26
+ Do not tell the source of the data
27
+ Question: {question}
28
+ """
29
+
30
+ prompt = ChatPromptTemplate.from_template(template)
31
+
32
+ model = AutoModelForCausalLM.from_pretrained(
33
+ "zephyr-7b-beta.Q4_K_S.gguf",
34
+ model_type='mistral',
35
+ threads=3,
36
+ )
37
+
38
+ chain = (
39
+ {"context": retriever, "question": RunnablePassthrough()}
40
+ | prompt
41
+ | model
42
+ | StrOutputParser()
43
+ )
44
+
45
+ # Uncomment and use the following for testing
46
+ # for chunk in chain.stream("Your question here"):
47
+ # print(chunk, end="", flush=True)