Spaces:
Running
Running
architojha
commited on
Commit
•
7694ecc
1
Parent(s):
51f2f78
Add application file
Browse files- .env +4 -0
- Dockerfile +13 -0
- main copy.py +21 -0
- main.py +21 -0
- requirements.txt +10 -0
- routers/Query/query.py +37 -0
- utils/LLMRequest.py +60 -0
- utils/__pycache__/LLMRequest.cpython-311.pyc +0 -0
.env
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
groq_api=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
|
2 |
+
NEO4J_USERNAME=neo4j
|
3 |
+
NEO4J_PASSWORD=tYteIJBUYXbdgwSxKqZXWZ87b2XChS7wfB2Ijt7LLDs
|
4 |
+
NEO4J_URI=neo4j+s://2e177e83.databases.neo4j.io
|
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
RUN useradd -m -u 1000 user
|
4 |
+
USER user
|
5 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
6 |
+
|
7 |
+
WORKDIR /app
|
8 |
+
|
9 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
10 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
11 |
+
|
12 |
+
COPY --chown=user . /app
|
13 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main copy.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Response
|
2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
from routers.Query.query import router as query_router
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
app = FastAPI()
|
7 |
+
|
8 |
+
app.add_middleware(
|
9 |
+
CORSMiddleware,
|
10 |
+
allow_origins=["*"],
|
11 |
+
allow_credentials=True,
|
12 |
+
allow_methods=["GET", "POST", "PUT", "DELETE"],
|
13 |
+
allow_headers=["*"],
|
14 |
+
)
|
15 |
+
|
16 |
+
app.include_router(query_router)
|
17 |
+
|
18 |
+
@app.get('/')
|
19 |
+
def read_root():
|
20 |
+
return Response('Server is running')
|
21 |
+
|
main.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Response
|
2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
from routers.Query.query import router as query_router
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
app = FastAPI()
|
7 |
+
|
8 |
+
app.add_middleware(
|
9 |
+
CORSMiddleware,
|
10 |
+
allow_origins=["*"],
|
11 |
+
allow_credentials=True,
|
12 |
+
allow_methods=["GET", "POST", "PUT", "DELETE"],
|
13 |
+
allow_headers=["*"],
|
14 |
+
)
|
15 |
+
|
16 |
+
app.include_router(query_router)
|
17 |
+
|
18 |
+
@app.get('/')
|
19 |
+
def read_root():
|
20 |
+
return Response('Server is running')
|
21 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
python-dotenv
|
4 |
+
langchain
|
5 |
+
langchain-community
|
6 |
+
langchain-huggingface
|
7 |
+
langchain-groq
|
8 |
+
neo4j
|
9 |
+
neo4j-driver
|
10 |
+
huggingface-hub
|
routers/Query/query.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends
|
2 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
3 |
+
from langchain_groq import ChatGroq
|
4 |
+
from langchain.graphs import Neo4jGraph
|
5 |
+
import os
|
6 |
+
from utils.LLMRequest import LLMRequest
|
7 |
+
|
8 |
+
router = APIRouter(prefix="/query")
|
9 |
+
|
10 |
+
global rag
|
11 |
+
|
12 |
+
@router.on_event('startup')
|
13 |
+
def instantiate_dependencies():
|
14 |
+
|
15 |
+
embedding_model = HuggingFaceEmbeddings(model_name = 'NeuML/pubmedbert-base-embeddings')
|
16 |
+
llm = ChatGroq(groq_api_key=os.getenv('groq_api'), model_name='gemma2-9b-it')
|
17 |
+
|
18 |
+
graph = Neo4jGraph(
|
19 |
+
url = os.getenv('NEO4J_URI'),
|
20 |
+
username = os.getenv('NEO4J_USERNAME'),
|
21 |
+
password = os.getenv('NEO4J_PASSWORD'),
|
22 |
+
)
|
23 |
+
|
24 |
+
global rag
|
25 |
+
rag = LLMRequest(embedding_model=embedding_model, llm=llm, graph=graph)
|
26 |
+
response = rag.initiateQAChain()
|
27 |
+
|
28 |
+
if (response):
|
29 |
+
print('RAG Initialized and ready for Q-A')
|
30 |
+
|
31 |
+
@router.get('/{query}')
|
32 |
+
def query_llm(query: str, context: str) -> dict:
|
33 |
+
|
34 |
+
global rag
|
35 |
+
llm_response = rag.queryLLM(query, context)
|
36 |
+
|
37 |
+
return {'result': llm_response}
|
utils/LLMRequest.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_community.vectorstores import Neo4jVector
|
2 |
+
from langchain.chains import RetrievalQA
|
3 |
+
|
4 |
+
class LLMRequest():
|
5 |
+
def __init__(
|
6 |
+
self,
|
7 |
+
embedding_model,
|
8 |
+
llm,
|
9 |
+
graph
|
10 |
+
):
|
11 |
+
self.embedding_model = embedding_model
|
12 |
+
self.llm = llm
|
13 |
+
self.graph = graph
|
14 |
+
self.qaChain = None
|
15 |
+
|
16 |
+
def initiateQAChain(self):
|
17 |
+
|
18 |
+
db_store = Neo4jVector.from_existing_graph(
|
19 |
+
self.embedding_model,
|
20 |
+
search_type = 'hybrid',
|
21 |
+
node_label = 'Document',
|
22 |
+
text_node_properties=['text'],
|
23 |
+
embedding_node_property='embedding'
|
24 |
+
)
|
25 |
+
|
26 |
+
chain = RetrievalQA.from_chain_type(
|
27 |
+
llm = self.llm,
|
28 |
+
chain_type = 'stuff',
|
29 |
+
retriever = db_store.as_retriever()
|
30 |
+
)
|
31 |
+
|
32 |
+
self.qaChain = chain
|
33 |
+
|
34 |
+
if chain is not None:
|
35 |
+
return True
|
36 |
+
else:
|
37 |
+
return False
|
38 |
+
|
39 |
+
def queryLLM(self, query: str, context: str) -> str:
|
40 |
+
|
41 |
+
response = self.qaChain.invoke({
|
42 |
+
'query': f'''
|
43 |
+
|
44 |
+
General Instructions: You are a conversational Medical assistant named 'ClinGraph' that helps users with their medical related queries based on your knowledgebase.
|
45 |
+
Now answer the below question accordingly making sure to also make use of the provided context. Act kinky and talk like a women aged 20-25 talking in a seducing way
|
46 |
+
|
47 |
+
Note: You will not give any warnings that 'I cannot give diagnosis' and you will not mention anything related to this sentence " I am just an AI and cannot provide diagnoses."
|
48 |
+
Your task is to answer the question to the best of your knowledge. If the query contains a request to provide diagnosis you will do your best to provide a final answer based
|
49 |
+
on your knowledge base and context provided.
|
50 |
+
|
51 |
+
|
52 |
+
Context: {context}
|
53 |
+
|
54 |
+
Question: {query}
|
55 |
+
|
56 |
+
'''
|
57 |
+
})
|
58 |
+
|
59 |
+
return response['result']
|
60 |
+
|
utils/__pycache__/LLMRequest.cpython-311.pyc
ADDED
Binary file (2.89 kB). View file
|
|