import os from dotenv import load_dotenv from transformers import pipeline, AutoTokenizer load_dotenv() # Load a larger Hugging Face model model_name = "EleutherAI/gpt-neo-2.7B" generator = pipeline("text-generation", model=model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) def modelFeedback(ats_score, resume_data): input_prompt = f""" You are now an ATS Score analyzer and given ATS Score is {int(ats_score * 100)}%. Your task is to provide feedback to the user based on the ATS score. Print ATS score first. Mention where the resume is good and where the resume lacks. Talk about each section of the user's resume and talk about good and bad points of it. Resume Data: {resume_data} """ # Tokenize the input to check its length input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids input_length = input_ids.shape[1] print(f"Input length: {input_length}") # Generate response response = generator(input_prompt, num_return_sequences=1) # Check if response is not empty if response and len(response) > 0: generated_text = response[0]['generated_text'] else: generated_text = "No response generated." response = generated_text return response