Spaces:
Running
Running
Upload 2 files
Browse files- backend/analysis.py +51 -0
- backend/vector_store.py +13 -0
backend/analysis.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from langchain_groq import ChatGroq
|
4 |
+
from langchain_core.prompts import PromptTemplate
|
5 |
+
|
6 |
+
# Set up Groq API key
|
7 |
+
load_dotenv() # Load environment variables from .env file
|
8 |
+
os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY") # Set the Groq API key from environment variables
|
9 |
+
|
10 |
+
# Initialize the ChatGroq model with the specified model name
|
11 |
+
llm = ChatGroq(model_name="mixtral-8x7b-32768")
|
12 |
+
|
13 |
+
def analyze_resume(full_resume, job_description):
|
14 |
+
# Template for analyzing the resume against the job description
|
15 |
+
template = """
|
16 |
+
You are an AI assistant specialized in resume analysis and recruitment. Analyze the given resume and compare it with the job description.
|
17 |
+
|
18 |
+
Example Response Structure:
|
19 |
+
|
20 |
+
**OVERVIEW**:
|
21 |
+
- **Match Percentage**: [Calculate overall match percentage between the resume and job description]
|
22 |
+
- **Matched Skills**: [List the skills in job description that match the resume]
|
23 |
+
- **Unmatched Skills**: [List the skills in the job description that are missing in the resume]
|
24 |
+
|
25 |
+
**DETAILED ANALYSIS**:
|
26 |
+
Provide a detailed analysis about:
|
27 |
+
1. Overall match percentage between the resume and job description
|
28 |
+
2. List of skills from the job description that match the resume
|
29 |
+
3. List of skills from the job description that are missing in the resume
|
30 |
+
|
31 |
+
**Additional Comments**:
|
32 |
+
Additional comments about the resume and suggestions for the recruiter or HR manager.
|
33 |
+
|
34 |
+
Resume: {resume}
|
35 |
+
Job Description: {job_description}
|
36 |
+
|
37 |
+
Analysis:
|
38 |
+
"""
|
39 |
+
prompt = PromptTemplate( # Create a prompt template with input variables
|
40 |
+
input_variables=["resume", "job_description"],
|
41 |
+
template=template
|
42 |
+
)
|
43 |
+
|
44 |
+
# Create a chain combining the prompt and the language model
|
45 |
+
chain = prompt | llm
|
46 |
+
|
47 |
+
# Invoke the chain with input data
|
48 |
+
response = chain.invoke({"resume": full_resume, "job_description": job_description})
|
49 |
+
|
50 |
+
# Return the content of the response
|
51 |
+
return response.content
|
backend/vector_store.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
2 |
+
from langchain_community.vectorstores import FAISS
|
3 |
+
|
4 |
+
# Initialize the Hugging Face embedding model
|
5 |
+
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
6 |
+
|
7 |
+
def create_vector_store(chunks):
|
8 |
+
# Store embeddings into the vector store
|
9 |
+
vector_store = FAISS.from_documents(
|
10 |
+
documents=chunks, # Input chunks to the vector store
|
11 |
+
embedding=embeddings # Use the initialized embeddings model
|
12 |
+
)
|
13 |
+
return vector_store
|