File size: 2,742 Bytes
e5daddf
 
 
 
 
 
 
 
 
f5a2c47
e5daddf
 
 
 
 
 
 
 
e90b454
e5daddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import json
import logging
from cv_prompt import (
    ResumeQualityEvaluation,
    get_section_detection_prompt,
    get_content_quality_prompt,
    calculate_section_detection_score,
    calculate_overall_score
)
from openai_utils import get_ai_response
from langchain.output_parsers import PydanticOutputParser
from spelling_grammar_checker import evaluate_cv_text
from cv_quality import CV

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def analyze_cv(file_path):
    try:
        from personal_information import analyze_personal_info
        # Extract text from CV
        cv = CV(file_path)
        text = cv.get_cv_text()

        # Personal Information Analysis
        personal_info = analyze_personal_info(file_path)

        # Spelling and Grammar Check
        error_percentage, spelling_grammar_score = evaluate_cv_text(text)

        # Section Detection
        sections_prompt = get_section_detection_prompt(text)
        sections_response = get_ai_response([{"role": "user", "content": sections_prompt}])
        if sections_response is None:
            return {"error": "Failed to get AI response for sections"}

        sections_data = json.loads(sections_response)
        detected_sections = sections_data.get('present_sections', [])
        section_detection_score = calculate_section_detection_score(detected_sections)
        logging.info(f"Detected sections: {detected_sections}")
        logging.info(f"Section detection score: {section_detection_score}")

        # Content Quality Analysis
        quality_prompt = get_content_quality_prompt(text)
        quality_response = get_ai_response([{"role": "user", "content": quality_prompt}])
        
        if quality_response is None:
            return {"error": "Failed to get AI response for content quality"}

        parser = PydanticOutputParser(pydantic_object=ResumeQualityEvaluation)
        evaluation_result = parser.parse(quality_response)
        
        overall_score = calculate_overall_score(evaluation_result)

        logging.info("All analyses completed")
        logging.info(f"Overall score: {overall_score}")

        return {
            "extracted_text": text,
            "personal_info": personal_info,
            "spelling_grammar_error_percentage": error_percentage,
            "spelling_grammar_score": spelling_grammar_score,
            "detected_sections": detected_sections,
            "section_detection_score": section_detection_score,
            "content_analysis": evaluation_result.dict(),
            "overall_score": overall_score
        }
    except Exception as e:
        logging.error(f"Error in CV analysis: {str(e)}", exc_info=True)
        return {"error": str(e)}