Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import os
|
|
2 |
import gradio as gr
|
3 |
import numpy as np
|
4 |
import tensorflow as tf
|
5 |
-
from transformers import
|
6 |
from dotenv import load_dotenv
|
7 |
import nltk
|
8 |
|
@@ -10,9 +10,9 @@ import nltk
|
|
10 |
load_dotenv()
|
11 |
hf_api_key = os.getenv("HUGGING_FACE_KEY")
|
12 |
|
13 |
-
# Initialize Hugging Face
|
14 |
llama_model = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
15 |
-
|
16 |
|
17 |
# Load the TensorFlow model
|
18 |
model = tf.keras.models.load_model("resume_generator_model.h5")
|
@@ -21,13 +21,12 @@ model = tf.keras.models.load_model("resume_generator_model.h5")
|
|
21 |
def enhance_with_huggingface(resume_text, job_title):
|
22 |
"""Generate enhanced resume content using Llama."""
|
23 |
prompt = f"Enhance the following resume for the job title '{job_title}': {resume_text}"
|
24 |
-
response =
|
25 |
-
return response['generated_text']
|
26 |
|
27 |
def enhance_with_local_model(resume_text, job_title):
|
28 |
"""Generate enhancements using local TensorFlow model."""
|
29 |
# Placeholder example: Use some custom logic for enhancement based on the local model.
|
30 |
-
# This is a simplified example assuming model returns some text modification.
|
31 |
sample_input = np.array([[len(resume_text.split()), len(job_title.split())]])
|
32 |
enhancement_score = model.predict(sample_input)
|
33 |
return f"Enhanced (Local Model) - Score: {enhancement_score[0][0]:.2f}"
|
|
|
2 |
import gradio as gr
|
3 |
import numpy as np
|
4 |
import tensorflow as tf
|
5 |
+
from transformers import pipeline
|
6 |
from dotenv import load_dotenv
|
7 |
import nltk
|
8 |
|
|
|
10 |
load_dotenv()
|
11 |
hf_api_key = os.getenv("HUGGING_FACE_KEY")
|
12 |
|
13 |
+
# Initialize Hugging Face Text Generation Pipeline
|
14 |
llama_model = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
15 |
+
text_generator = pipeline("text-generation", model=llama_model, use_auth_token=hf_api_key)
|
16 |
|
17 |
# Load the TensorFlow model
|
18 |
model = tf.keras.models.load_model("resume_generator_model.h5")
|
|
|
21 |
def enhance_with_huggingface(resume_text, job_title):
|
22 |
"""Generate enhanced resume content using Llama."""
|
23 |
prompt = f"Enhance the following resume for the job title '{job_title}': {resume_text}"
|
24 |
+
response = text_generator(prompt, max_length=500, num_return_sequences=1)
|
25 |
+
return response[0]['generated_text']
|
26 |
|
27 |
def enhance_with_local_model(resume_text, job_title):
|
28 |
"""Generate enhancements using local TensorFlow model."""
|
29 |
# Placeholder example: Use some custom logic for enhancement based on the local model.
|
|
|
30 |
sample_input = np.array([[len(resume_text.split()), len(job_title.split())]])
|
31 |
enhancement_score = model.predict(sample_input)
|
32 |
return f"Enhanced (Local Model) - Score: {enhancement_score[0][0]:.2f}"
|