NEXAS commited on
Commit
57e380d
·
verified ·
1 Parent(s): 0ddb493

Update utils/qa_text.py

Browse files
Files changed (1) hide show
  1. utils/qa_text.py +83 -0
utils/qa_text.py CHANGED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ from langchain_groq import ChatGroq
4
+ from langchain.prompts import PromptTemplate
5
+ from langchain_community.vectorstores import Qdrant
6
+ from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
7
+ from qdrant_client import QdrantClient
8
+ #from langchain_community.chat_models import ChatOllama
9
+
10
+
11
+ #import chainlit as cl
12
+ from langchain.chains import RetrievalQA
13
+
14
+ # bring in our GROQ_API_KEY
15
+ from dotenv import load_dotenv
16
+ load_dotenv()
17
+
18
+ groq_api_key = os.getenv("GROQ_API_KEY")
19
+ qdrant_url = os.getenv("QDRANT_URL")
20
+ qdrant_api_key = os.getenv("QDRANT_API_KEY")
21
+
22
+ custom_prompt_template = """Use the following pieces of information to answer the user's question.
23
+ If you don't know the answer, just say that you don't know,if it is out of context say that it is out of context and also try to provide the answer and don't be rude.
24
+ Context: {context}
25
+ Question: {question}
26
+ Only return the helpful answer below and nothing else.
27
+ Helpful answer:
28
+ """
29
+
30
+ def set_custom_prompt():
31
+ """
32
+ Prompt template for QA retrieval for each vectorstore
33
+ """
34
+ prompt = PromptTemplate(template=custom_prompt_template,
35
+ input_variables=['context', 'question'])
36
+ return prompt
37
+
38
+
39
+ chat_model = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
40
+ #chat_model = ChatGroq(temperature=0, model_name="Llama2-70b-4096")
41
+ #chat_model = ChatOllama(model="llama2", request_timeout=30.0)
42
+
43
+ client = QdrantClient(api_key=qdrant_api_key, url=qdrant_url,)
44
+
45
+
46
+ def retrieval_qa_chain(llm, prompt, vectorstore):
47
+ qa_chain = RetrievalQA.from_chain_type(
48
+ llm=llm,
49
+ chain_type="stuff",
50
+ retriever=vectorstore.as_retriever(search_kwargs={'k': 3}),
51
+ return_source_documents=True,
52
+ chain_type_kwargs={'prompt': prompt}
53
+ )
54
+ return qa_chain
55
+
56
+
57
+ def qa_bot():
58
+ embeddings = FastEmbedEmbeddings()
59
+ vectorstore = Qdrant(client=client, embeddings=embeddings, collection_name="rag")
60
+ llm = chat_model
61
+ qa_prompt=set_custom_prompt()
62
+ qa = retrieval_qa_chain(llm, qa_prompt, vectorstore)
63
+ return qa
64
+
65
+ #---------------------------------------------------------------------#
66
+
67
+ #qdrant_cloud_api_key="your_qdrant_cloud_api_key"
68
+ #qdrant_url="your_qdrant_url"
69
+
70
+ #qdrant_cloud = Qdrant.from_documents(
71
+ # docs,
72
+ # embeddings,
73
+ # url=qdrant_url,
74
+ # prefer_grpc=True,
75
+ # api_key=qdrant_cloud_api_key,
76
+ # collection_name="qdrant_cloud_documents",
77
+ #)
78
+
79
+ #---------------------------------------------------------------------#
80
+ query="how to make coffee"
81
+ print(query)
82
+
83
+ chain = qa_bot()