|
import gradio as gr |
|
import torch |
|
from transformers import pipeline |
|
import pandas as pd |
|
import re |
|
|
|
|
|
nlp = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", framework="pt") |
|
|
|
def parse_resume(resume_text): |
|
"""Parse the resume and extract details like name, email, phone, and skills.""" |
|
|
|
phone_pattern = r'\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}' |
|
email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}' |
|
|
|
|
|
phone = re.findall(phone_pattern, resume_text) |
|
email = re.findall(email_pattern, resume_text) |
|
|
|
|
|
entities = nlp(resume_text) |
|
skills = [entity['word'] for entity in entities if 'MISC' in entity['entity']] |
|
|
|
|
|
skills = ", ".join(skills) if skills else "No skills found" |
|
|
|
|
|
parsed_data = { |
|
"Phone": phone[0] if phone else "Not found", |
|
"Email": email[0] if email else "Not found", |
|
"Skills": skills, |
|
} |
|
|
|
return parsed_data |
|
|
|
def process_resumes(csv_file): |
|
"""Process a CSV file of resumes and output a single Excel file.""" |
|
|
|
df = pd.read_csv(csv_file.name) |
|
|
|
|
|
if 'Resume' not in df.columns: |
|
return "Error: The CSV file must contain a 'Resume' column." |
|
|
|
all_parsed_data = [] |
|
|
|
|
|
for _, row in df.iterrows(): |
|
resume_text = row['Resume'] |
|
parsed_info = parse_resume(resume_text) |
|
all_parsed_data.append(parsed_info) |
|
|
|
|
|
parsed_df = pd.DataFrame(all_parsed_data) |
|
|
|
|
|
output_file = "parsed_resumes.xlsx" |
|
parsed_df.to_excel(output_file, index=False) |
|
|
|
return output_file |
|
|
|
|
|
gr.Interface( |
|
fn=process_resumes, |
|
inputs=gr.File(file_count="single", label="Upload Resume CSV"), |
|
outputs=gr.File(label="Download Parsed Data (Excel)"), |
|
title="AI Resume Parser", |
|
description="Upload a CSV file containing resume texts to extract details like Name, Email, Phone, and Skills. The results will be saved in an Excel file." |
|
).launch() |