yahrkapa commited on
Commit
021b3f8
1 Parent(s): e4e26a9
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
chat.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_nvidia_ai_endpoints import ChatNVIDIA
3
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
4
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
5
+ from llama_index.core import Settings
6
+ from langchain_core.prompts import ChatPromptTemplate
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv()
10
+
11
+ # Initialize global variables
12
+ index = None
13
+ query_engine = None
14
+ prompt_template = None
15
+
16
+ def load_model():
17
+ global index, query_engine, prompt_template
18
+
19
+ # Retrieve the NVIDIA API key from the environment variables
20
+ api_key = os.getenv("NVIDIA_API_KEY")
21
+
22
+ # Initialize the ChatNVIDIA instance with the retrieved API key
23
+ llm = ChatNVIDIA(model="mixtral_8x7b", api_key=api_key)
24
+
25
+ # Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
26
+ # Settings.embed_model = HuggingFaceEmbedding(model_name="distilbert-base-uncased")
27
+ # Settings.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/paraphrase-TinyBERT-L6-v2")
28
+ Settings.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
29
+ Settings.llm = llm
30
+
31
+ documents = SimpleDirectoryReader('pdf/').load_data()
32
+ index = VectorStoreIndex.from_documents(documents)
33
+ query_engine = index.as_query_engine()
34
+
35
+ # Correctly initializing the prompt template using the correct class name and method
36
+ prompt_template = ChatPromptTemplate.from_messages(
37
+ [
38
+ ("system", "Act as my personal assistant and handle inquiries from individuals interested in my professional journey. I am actively seeking job opportunities in the field of AI and Machine Learning. Respond to all questions about me smartly, highlighting my relevant experience. Ensure that your answers are precise and engaging."),
39
+ ("user", "Question: {question}")
40
+ ]
41
+ )
42
+
43
+ def query_chatbot_with_prompt(question):
44
+ # Assuming format_messages should create a simple query string. Check if you need to adjust how you use format_messages.
45
+ try:
46
+ # Ensure that formatted_prompt is correctly prepared as a string
47
+ formatted_prompt = prompt_template.format_messages(question=question)
48
+ # Convert formatted_prompt to string if it is not already (just a safety check; adjust as necessary)
49
+ if not isinstance(formatted_prompt, str):
50
+ formatted_prompt = str(formatted_prompt)
51
+
52
+ # Now call the query method with the correctly formatted string
53
+ response = query_engine.query(formatted_prompt)
54
+ return response
55
+ except Exception as e:
56
+ return f"An error occurred: {str(e)}"
main.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from chat import query_chatbot_with_prompt, load_model
4
+
5
+ app = FastAPI()
6
+
7
+ class QueryRequest(BaseModel):
8
+ question: str
9
+
10
+ @app.on_event("startup")
11
+ async def startup_event():
12
+ load_model()
13
+
14
+ @app.post("/query/")
15
+ async def query_chatbot(request: QueryRequest):
16
+ response = query_chatbot_with_prompt(request.question)
17
+ if not response:
18
+ raise HTTPException(status_code=500, detail="No response from the chatbot")
19
+ return {"response": response}
20
+
pdf/Yatharth-Kapadia-FlowCV-Resume-20240418.pdf ADDED
Binary file (165 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ transformers
3
+ langchain_nvidia_ai_endpoints
4
+ llama_index
5
+ langchain_core
6
+ python-dotenv
7
+ fastapi
8
+ pydantic
9
+ llama-index-embeddings-huggingface
10
+ llama-index-llms-langchain