Deepak Pant
commited on
Commit
·
c2e4879
1
Parent(s):
a5883c9
Updated poetry setup
Browse files- .devcontainer/devcontainer.json +9 -3
- .vscode/settings.json +2 -1
- Makefile +4 -56
- README.md +1 -1
- poetry.lock +0 -0
- pyproject.toml +78 -61
- resume_maker_ai_agent/app2.py +64 -0
- resume_maker_ai_agent/config/agents.yaml +36 -6
- resume_maker_ai_agent/config/tasks.yaml +43 -5
- resume_maker_ai_agent/crew.py +77 -14
- resume_maker_ai_agent/services/app_service.py +37 -28
- resume_maker_ai_agent/services/web_scarapper_service.py +0 -226
- resume_maker_ai_agent/tools/custom_tool.py +2 -7
.devcontainer/devcontainer.json
CHANGED
@@ -15,7 +15,10 @@
|
|
15 |
},
|
16 |
// Use 'postCreateCommand' to run commands after the container is created.
|
17 |
"postCreateCommand": "./.devcontainer/postCreateCommand.sh",
|
18 |
-
"forwardPorts": [
|
|
|
|
|
|
|
19 |
// Configure tool-specific properties.
|
20 |
"customizations": {
|
21 |
"vscode": {
|
@@ -30,10 +33,13 @@
|
|
30 |
"ms-python.python", // Python
|
31 |
"ms-python.black-formatter", // Black
|
32 |
"ms-python.debugpy", // Debugger for Python
|
33 |
-
"redhat.vscode-yaml" // YAML
|
|
|
34 |
],
|
35 |
"settings": {
|
36 |
-
"python.testing.pytestArgs": [
|
|
|
|
|
37 |
"python.testing.unittestEnabled": false,
|
38 |
"python.testing.pytestEnabled": true,
|
39 |
"python.defaultInterpreterPath": "/workspaces/resume-maker-ai-agent/.venv/bin/python",
|
|
|
15 |
},
|
16 |
// Use 'postCreateCommand' to run commands after the container is created.
|
17 |
"postCreateCommand": "./.devcontainer/postCreateCommand.sh",
|
18 |
+
"forwardPorts": [
|
19 |
+
7860,
|
20 |
+
8000
|
21 |
+
],
|
22 |
// Configure tool-specific properties.
|
23 |
"customizations": {
|
24 |
"vscode": {
|
|
|
33 |
"ms-python.python", // Python
|
34 |
"ms-python.black-formatter", // Black
|
35 |
"ms-python.debugpy", // Debugger for Python
|
36 |
+
"redhat.vscode-yaml", // YAML
|
37 |
+
"tamasfe.even-better-toml" // TOML
|
38 |
],
|
39 |
"settings": {
|
40 |
+
"python.testing.pytestArgs": [
|
41 |
+
"tests"
|
42 |
+
],
|
43 |
"python.testing.unittestEnabled": false,
|
44 |
"python.testing.pytestEnabled": true,
|
45 |
"python.defaultInterpreterPath": "/workspaces/resume-maker-ai-agent/.venv/bin/python",
|
.vscode/settings.json
CHANGED
@@ -12,5 +12,6 @@
|
|
12 |
"**/coverage.xml": true,
|
13 |
"**/build": true,
|
14 |
"**/dist": true
|
15 |
-
}
|
|
|
16 |
}
|
|
|
12 |
"**/coverage.xml": true,
|
13 |
"**/build": true,
|
14 |
"**/dist": true
|
15 |
+
},
|
16 |
+
"makefile.makefilePath": "/workspaces/resume-maker-ai-agent/Makefile"
|
17 |
}
|
Makefile
CHANGED
@@ -39,7 +39,7 @@ help: ## Display this help message
|
|
39 |
.PHONY: bake-env
|
40 |
bake-env: clean-env ## Install the poetry environment and set up pre-commit hooks
|
41 |
@echo "🚀 Creating virtual environment using pyenv and poetry"
|
42 |
-
@poetry install
|
43 |
@poetry run pre-commit install || true
|
44 |
@max_retries=3; count=0; \
|
45 |
while ! make lint; do \
|
@@ -73,58 +73,6 @@ init-repo: ## Initialize git repository
|
|
73 |
@echo "🚀 Pushing initial commit"
|
74 |
@git push -u origin main
|
75 |
|
76 |
-
.PHONY: setup-cloud-env
|
77 |
-
setup-cloud-env: ## Create resource group, container app environment, and service principal
|
78 |
-
@echo "🚀 Creating resource group: $(RESOURCE_GROUP)"
|
79 |
-
@az group create --name $(RESOURCE_GROUP) --location $(CLOUD_REGION)
|
80 |
-
|
81 |
-
@echo "🚀 Creating container app environment: $(APP_ENV_NAME)"
|
82 |
-
@az containerapp env create --name $(APP_ENV_NAME) --resource-group $(RESOURCE_GROUP) --location $(CLOUD_REGION)
|
83 |
-
|
84 |
-
@echo "🚀 Fetching subscription ID"
|
85 |
-
@subscription_id=$$(az account show --query "id" -o tsv) && \
|
86 |
-
echo "Subscription ID: $$subscription_id" && \
|
87 |
-
echo "🚀 Creating service principal for: $(APP_NAME)" && \
|
88 |
-
az ad sp create-for-rbac --name "$(APP_NAME)-service-principal" --role contributor --scopes /subscriptions/$$subscription_id --sdk-auth
|
89 |
-
|
90 |
-
@echo "🚀 Creating container app: $(APP_NAME)"
|
91 |
-
@az containerapp create --name $(APP_NAME) --resource-group $(RESOURCE_GROUP) --environment $(APP_ENV_NAME) --image 'nginx:latest' --target-port 80 --ingress 'external' --query "properties.configuration.ingress.fqdn"
|
92 |
-
|
93 |
-
.PHONY: clean-cloud-env
|
94 |
-
clean-cloud-env: ## Delete resource group, container app environment, and service principal
|
95 |
-
@echo "🚀 Deleting service principal for: $(APP_NAME)-service-principal"
|
96 |
-
@sp_object_id=$$(az ad sp list --display-name "$(APP_NAME)-service-principal" --query "[0].id" -o tsv) && \
|
97 |
-
if [ -n "$$sp_object_id" ]; then \
|
98 |
-
az ad sp delete --id $$sp_object_id; \
|
99 |
-
echo "Service principal deleted"; \
|
100 |
-
else \
|
101 |
-
echo "Service principal not found, skipping deletion"; \
|
102 |
-
fi
|
103 |
-
|
104 |
-
@echo "🚀 Deleting container app: $(APP_NAME)"
|
105 |
-
@az containerapp delete --name $(APP_NAME) --resource-group $(RESOURCE_GROUP) --yes --no-wait || echo "Container app not found, skipping deletion"
|
106 |
-
|
107 |
-
@echo "🚀 Deleting container app environment: $(APP_ENV_NAME)"
|
108 |
-
@az containerapp env delete --name $(APP_ENV_NAME) --resource-group $(RESOURCE_GROUP) --yes --no-wait || echo "Container app environment not found, skipping deletion"
|
109 |
-
|
110 |
-
@echo "🚀 Deleting resource group: $(RESOURCE_GROUP)"
|
111 |
-
@az group delete --name $(RESOURCE_GROUP) --yes --no-wait || echo "Resource group not found, skipping deletion"
|
112 |
-
|
113 |
-
.PHONY: install-prerequisites
|
114 |
-
install-prerequisites: ## Install system prerequisites
|
115 |
-
@echo "Updating package lists..."
|
116 |
-
@sudo apt-get update
|
117 |
-
@echo "Installing system prerequisites..."
|
118 |
-
@sudo apt-get install -y \
|
119 |
-
chromium \
|
120 |
-
chromium-driver \
|
121 |
-
gnupg \
|
122 |
-
libgconf-2-4 \
|
123 |
-
libnss3 \
|
124 |
-
unzip \
|
125 |
-
wget \
|
126 |
-
sqlite3
|
127 |
-
|
128 |
|
129 |
# =============================
|
130 |
# Code Quality and Testing
|
@@ -184,7 +132,7 @@ bake-and-publish: bake publish ## Build and publish to PyPI
|
|
184 |
.PHONY: update
|
185 |
update: ## Update project dependencies
|
186 |
@echo "🚀 Updating project dependencies"
|
187 |
-
@poetry update
|
188 |
@poetry run pre-commit install --overwrite
|
189 |
@echo "Dependencies updated successfully"
|
190 |
|
@@ -194,7 +142,7 @@ update: ## Update project dependencies
|
|
194 |
.PHONY: run
|
195 |
run: ## Run the project's main application
|
196 |
@echo "🚀 Running the project"
|
197 |
-
@poetry run streamlit run $(PROJECT_SLUG)/
|
198 |
|
199 |
.PHONY: docs-test
|
200 |
docs-test: ## Test if documentation can be built without warnings or errors
|
@@ -263,4 +211,4 @@ teardown: clean-bake clean-container ## Clean up temporary files and directories
|
|
263 |
@echo "🚀 Clean up completed."
|
264 |
|
265 |
.PHONY: teardown-all
|
266 |
-
teardown-all: teardown
|
|
|
39 |
.PHONY: bake-env
|
40 |
bake-env: clean-env ## Install the poetry environment and set up pre-commit hooks
|
41 |
@echo "🚀 Creating virtual environment using pyenv and poetry"
|
42 |
+
@poetry install --all-extras
|
43 |
@poetry run pre-commit install || true
|
44 |
@max_retries=3; count=0; \
|
45 |
while ! make lint; do \
|
|
|
73 |
@echo "🚀 Pushing initial commit"
|
74 |
@git push -u origin main
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
# =============================
|
78 |
# Code Quality and Testing
|
|
|
132 |
.PHONY: update
|
133 |
update: ## Update project dependencies
|
134 |
@echo "🚀 Updating project dependencies"
|
135 |
+
@poetry update --all-extras
|
136 |
@poetry run pre-commit install --overwrite
|
137 |
@echo "Dependencies updated successfully"
|
138 |
|
|
|
142 |
.PHONY: run
|
143 |
run: ## Run the project's main application
|
144 |
@echo "🚀 Running the project"
|
145 |
+
@poetry run streamlit run $(PROJECT_SLUG)/app2.py --server.port 7860
|
146 |
|
147 |
.PHONY: docs-test
|
148 |
docs-test: ## Test if documentation can be built without warnings or errors
|
|
|
211 |
@echo "🚀 Clean up completed."
|
212 |
|
213 |
.PHONY: teardown-all
|
214 |
+
teardown-all: teardown ## Clean up temporary files and directories and destroy the virtual environment, Docker image, and Cloud resources
|
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: Resume Maker AI
|
3 |
-
emoji:
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: docker
|
|
|
1 |
---
|
2 |
title: Resume Maker AI
|
3 |
+
emoji: 📄
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: docker
|
poetry.lock
DELETED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
CHANGED
@@ -1,68 +1,65 @@
|
|
1 |
-
[
|
2 |
name = "resume_maker_ai_agent"
|
3 |
version = "0.0.1"
|
4 |
description = "This app will download Jio-Savan music."
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
repository = "https://github.com/DeepakPant93/resume-maker-ai-agent"
|
7 |
documentation = "https://DeepakPant93.github.io/resume-maker-ai-agent/"
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
]
|
12 |
|
13 |
-
[
|
14 |
resume_maker_ai_agent = "resume_maker_ai_agent.__main__:main"
|
15 |
|
16 |
-
[tool.poetry.dependencies]
|
17 |
-
python = ">=3.11,<3.13"
|
18 |
-
crewai = {version = ">=0.86.0,<1.0.0", extras = ["tools"]}
|
19 |
-
streamlit = "^1.41.1"
|
20 |
-
selenium = "^4.27.1"
|
21 |
-
# chromedriver_autoinstaller = "^0.6.4"
|
22 |
-
# webdriver-manager = "*"
|
23 |
-
bs4 = "*"
|
24 |
-
|
25 |
-
[tool.poetry.group.dev.dependencies]
|
26 |
-
deptry = "^0.16.2" # For dependency management
|
27 |
-
mypy = "^1.5.1" # Static type checking
|
28 |
-
pre-commit = "^3.4.0" # Pre-commit hooks
|
29 |
-
tox = "^4.11.1" # Testing in multiple environments
|
30 |
-
ipykernel = "^6.25.0" # Jupyter kernel
|
31 |
-
black = "^23.9.0" # Code formatter
|
32 |
-
build = "^1.0.0" # Build management
|
33 |
-
bump-my-version = "^0.28.2" # Bump versions automatically
|
34 |
-
codespell = "^2.2.5" # Spell checking in code
|
35 |
-
wheel = "^0.41.0" # Build wheels
|
36 |
-
twine = "^4.0.0" # Publish packages
|
37 |
-
bandit = "^1.8.0" # Security check
|
38 |
-
pylint = "^3.0.0" # Powerful linter
|
39 |
-
pydocstyle = "^6.3.0" # Enforce PEP 257 docstring conventions
|
40 |
-
# isort = "^5.12.0" # Sort imports
|
41 |
-
# ruff = "^0.8.6" # Linting tool
|
42 |
-
|
43 |
-
[tool.poetry.group.docs.dependencies]
|
44 |
-
mkdocs = "^1.5.0" # Documentation site generator
|
45 |
-
sphinx = "^7.2.0" # Documentation tool
|
46 |
-
mkdocs-git-revision-date-plugin = "^0.3.2" # Show revision dates
|
47 |
-
mkdocs-git-revision-date-localized-plugin = "^1.3.0" # Localized dates
|
48 |
-
mkdocs-jupyter = ">=0.25.1" # For Jupyter Notebook integration
|
49 |
-
mkdocs-pdf-export-plugin = "^0.5.10" # PDF export
|
50 |
-
mkdocs-material = ">=9.1.3" # MkDocs Material theme
|
51 |
-
mkdocstrings-crystal = "^0.3.0" # Mkdocstrings for Crystal
|
52 |
-
pygments = "^2.16.0" # Syntax highlighting
|
53 |
-
pymdown-extensions = "^10.0" # Markdown extensions
|
54 |
-
nbconvert = "^7.7.0" # Convert notebooks to other formats
|
55 |
-
nbformat = "^5.9.0" # Notebook format support
|
56 |
-
livereload = "^2.6.3" # Live reload for MkDocs
|
57 |
-
watchdog = "^3.0.0" # File monitoring
|
58 |
-
mkdocstrings = {extras = ["python"], version = "^0.27.0"} # Auto-generate documentation from docstrings
|
59 |
-
mkdocs-minify-plugin = "^0.8.0" # Minify HTML
|
60 |
-
|
61 |
[build-system]
|
62 |
-
requires = ["poetry-core>=
|
63 |
build-backend = "poetry.core.masonry.api"
|
64 |
|
65 |
-
|
66 |
[tool.mypy]
|
67 |
files = ["resume_maker_ai_agent"]
|
68 |
disallow_untyped_defs = true
|
@@ -85,7 +82,7 @@ module = [
|
|
85 |
"crewai.*",
|
86 |
"crewai_tools.*",
|
87 |
"bs4.*",
|
88 |
-
"resume_maker_ai_agent.crew"
|
89 |
]
|
90 |
ignore_missing_imports = true
|
91 |
ignore_errors = true
|
@@ -99,7 +96,7 @@ python_classes = ["Test*"]
|
|
99 |
python_functions = ["test_*"]
|
100 |
filterwarnings = [
|
101 |
"ignore:.*general_plain_validator_function.*:DeprecationWarning",
|
102 |
-
"ignore:.*with_info_plain_validator_function.*:DeprecationWarning"
|
103 |
]
|
104 |
|
105 |
|
@@ -127,7 +124,8 @@ select = [
|
|
127 |
# mccabe
|
128 |
"C90",
|
129 |
# pycodestyle
|
130 |
-
"E",
|
|
|
131 |
# pyflakes
|
132 |
"F",
|
133 |
# pygrep-hooks
|
@@ -145,7 +143,7 @@ ignore = [
|
|
145 |
# DoNotAssignLambda
|
146 |
"E731",
|
147 |
# Possible binding to all interfaces - Require for Docker container
|
148 |
-
"S104"
|
149 |
]
|
150 |
|
151 |
[tool.ruff.format]
|
@@ -161,10 +159,10 @@ source = ["resume_maker_ai_agent"]
|
|
161 |
# parallel = true
|
162 |
# concurrency = ["thread"]
|
163 |
omit = [
|
164 |
-
"**/__init__.py",
|
165 |
"resume_maker_ai_agent/crew.py", # Exclude crew.py file
|
166 |
-
"resume_maker_ai_agent/config/*",
|
167 |
-
"resume_maker_ai_agent/models/*",
|
168 |
]
|
169 |
|
170 |
[tool.ruff.per-file-ignores]
|
@@ -181,7 +179,26 @@ search = 'version = "{current_version}"'
|
|
181 |
replace = 'version = "{new_version}"'
|
182 |
|
183 |
[tool.deptry]
|
184 |
-
exclude = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
ignore = ["DEP003"]
|
186 |
|
187 |
[tool.pydocstyle]
|
|
|
1 |
+
[project]
|
2 |
name = "resume_maker_ai_agent"
|
3 |
version = "0.0.1"
|
4 |
description = "This app will download Jio-Savan music."
|
5 |
+
readme = "README.md"
|
6 |
+
authors = [{ name = "Deepak Pant", email = "deepak.93p@gmail.com" }]
|
7 |
+
requires-python = ">=3.10,<=3.13"
|
8 |
+
dependencies = [
|
9 |
+
"crewai[tools]>=0.86.0,<1.0.0",
|
10 |
+
"streamlit >=1.41.1",
|
11 |
+
# "PyPDF2 >=3.0.1",
|
12 |
+
# "python-docx >=1.1.2",
|
13 |
+
]
|
14 |
+
|
15 |
+
[project.urls]
|
16 |
repository = "https://github.com/DeepakPant93/resume-maker-ai-agent"
|
17 |
documentation = "https://DeepakPant93.github.io/resume-maker-ai-agent/"
|
18 |
+
|
19 |
+
|
20 |
+
[project.optional-dependencies]
|
21 |
+
dev = [
|
22 |
+
"deptry==0.16.2",
|
23 |
+
"mypy==1.5.1",
|
24 |
+
"pre-commit==3.4.0",
|
25 |
+
"tox==4.11.1",
|
26 |
+
"ipykernel==6.25.0",
|
27 |
+
"black==23.9.0",
|
28 |
+
"build==1.2.2",
|
29 |
+
"bump-my-version==0.28.2",
|
30 |
+
"codespell==2.2.5",
|
31 |
+
"wheel==0.41.0",
|
32 |
+
"twine==4.0.0",
|
33 |
+
"bandit==1.8.0",
|
34 |
+
"pylint==3.0.0",
|
35 |
+
"pydocstyle==6.3.0",
|
36 |
+
]
|
37 |
+
docs = [
|
38 |
+
"mkdocs==1.5.0",
|
39 |
+
"sphinx==7.2.0",
|
40 |
+
"mkdocs-git-revision-date-plugin==0.3.2",
|
41 |
+
"mkdocs-git-revision-date-localized-plugin==1.3.0",
|
42 |
+
"mkdocs-jupyter>=0.25.1",
|
43 |
+
"mkdocs-pdf-export-plugin==0.5.10",
|
44 |
+
"mkdocs-material>=9.1.3",
|
45 |
+
"mkdocstrings-crystal==0.3.7",
|
46 |
+
"pygments==2.16.0",
|
47 |
+
"pymdown-extensions==10.0",
|
48 |
+
"nbconvert==7.16.5",
|
49 |
+
"nbformat==5.9.0",
|
50 |
+
"livereload==2.6.3",
|
51 |
+
"watchdog==3.0.0",
|
52 |
+
"mkdocstrings[python]==0.27.0",
|
53 |
+
"mkdocs-minify-plugin==0.8.0",
|
54 |
]
|
55 |
|
56 |
+
[project.scripts]
|
57 |
resume_maker_ai_agent = "resume_maker_ai_agent.__main__:main"
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
[build-system]
|
60 |
+
requires = ["poetry-core>=2.0.0"]
|
61 |
build-backend = "poetry.core.masonry.api"
|
62 |
|
|
|
63 |
[tool.mypy]
|
64 |
files = ["resume_maker_ai_agent"]
|
65 |
disallow_untyped_defs = true
|
|
|
82 |
"crewai.*",
|
83 |
"crewai_tools.*",
|
84 |
"bs4.*",
|
85 |
+
"resume_maker_ai_agent.crew",
|
86 |
]
|
87 |
ignore_missing_imports = true
|
88 |
ignore_errors = true
|
|
|
96 |
python_functions = ["test_*"]
|
97 |
filterwarnings = [
|
98 |
"ignore:.*general_plain_validator_function.*:DeprecationWarning",
|
99 |
+
"ignore:.*with_info_plain_validator_function.*:DeprecationWarning",
|
100 |
]
|
101 |
|
102 |
|
|
|
124 |
# mccabe
|
125 |
"C90",
|
126 |
# pycodestyle
|
127 |
+
"E",
|
128 |
+
"W",
|
129 |
# pyflakes
|
130 |
"F",
|
131 |
# pygrep-hooks
|
|
|
143 |
# DoNotAssignLambda
|
144 |
"E731",
|
145 |
# Possible binding to all interfaces - Require for Docker container
|
146 |
+
"S104",
|
147 |
]
|
148 |
|
149 |
[tool.ruff.format]
|
|
|
159 |
# parallel = true
|
160 |
# concurrency = ["thread"]
|
161 |
omit = [
|
162 |
+
"**/__init__.py", # Exclude all init files
|
163 |
"resume_maker_ai_agent/crew.py", # Exclude crew.py file
|
164 |
+
"resume_maker_ai_agent/config/*", # Exclude all files in config folder
|
165 |
+
"resume_maker_ai_agent/models/*", # Exclude all files in model folder
|
166 |
]
|
167 |
|
168 |
[tool.ruff.per-file-ignores]
|
|
|
179 |
replace = 'version = "{new_version}"'
|
180 |
|
181 |
[tool.deptry]
|
182 |
+
exclude = [
|
183 |
+
"research",
|
184 |
+
"artifacts",
|
185 |
+
"notebooks",
|
186 |
+
"tests",
|
187 |
+
"docs",
|
188 |
+
".venv",
|
189 |
+
"venv",
|
190 |
+
"__pycache__",
|
191 |
+
".ruff_cache",
|
192 |
+
".pytest_cache",
|
193 |
+
".mypy_cache",
|
194 |
+
".coverage",
|
195 |
+
".git",
|
196 |
+
"build",
|
197 |
+
"dist",
|
198 |
+
".github",
|
199 |
+
"site",
|
200 |
+
"config",
|
201 |
+
]
|
202 |
ignore = ["DEP003"]
|
203 |
|
204 |
[tool.pydocstyle]
|
resume_maker_ai_agent/app2.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
# import PyPDF2
|
3 |
+
import io
|
4 |
+
# from docx import Document
|
5 |
+
# from docx.shared import Inches
|
6 |
+
import openai
|
7 |
+
# import tempfile
|
8 |
+
|
9 |
+
from resume_maker_ai_agent.services.app_service import run, create_docx
|
10 |
+
|
11 |
+
|
12 |
+
def main():
|
13 |
+
print("main......")
|
14 |
+
st.set_page_config(page_title="Resume Maker AI", page_icon="📄")
|
15 |
+
|
16 |
+
st.title("Resume Maker AI")
|
17 |
+
st.write("Customize your resume for specific job descriptions using AI")
|
18 |
+
|
19 |
+
# File upload
|
20 |
+
uploaded_file = st.file_uploader("Upload your resume (PDF)", type="pdf")
|
21 |
+
|
22 |
+
# Job description input
|
23 |
+
job_description = st.text_area("Enter the job description:", height=200)
|
24 |
+
|
25 |
+
if st.button("Customize Resume") and uploaded_file is not None and job_description:
|
26 |
+
with st.spinner("Customizing your resume..."):
|
27 |
+
try:
|
28 |
+
# Customize resume
|
29 |
+
customized_resume = run(
|
30 |
+
uploaded_file, job_description)
|
31 |
+
|
32 |
+
# Display customized resume
|
33 |
+
st.subheader("Customized Resume")
|
34 |
+
st.write(customized_resume)
|
35 |
+
|
36 |
+
# Create download button
|
37 |
+
doc_buffer = create_docx(customized_resume)
|
38 |
+
st.download_button(
|
39 |
+
label="Download Customized Resume",
|
40 |
+
data=doc_buffer,
|
41 |
+
file_name="customized_resume.docx",
|
42 |
+
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
43 |
+
)
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
st.error(f"An error occurred: {str(e)}")
|
47 |
+
|
48 |
+
# Add instructions and tips
|
49 |
+
with st.expander("How to use"):
|
50 |
+
st.write("""
|
51 |
+
1. Upload your current resume in PDF format
|
52 |
+
2. Paste the job description you're targeting
|
53 |
+
3. Click 'Customize Resume' to generate a tailored version
|
54 |
+
4. Review the customized resume
|
55 |
+
5. Download the result as a Word document
|
56 |
+
""")
|
57 |
+
|
58 |
+
# Footer
|
59 |
+
st.markdown("---")
|
60 |
+
st.markdown("Built with Streamlit and Crew AI")
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
main()
|
resume_maker_ai_agent/config/agents.yaml
CHANGED
@@ -1,9 +1,39 @@
|
|
1 |
-
|
2 |
role: >
|
3 |
-
|
4 |
goal: >
|
5 |
-
|
|
|
6 |
backstory: >
|
7 |
-
|
8 |
-
|
9 |
-
information
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
job_researcher:
|
2 |
role: >
|
3 |
+
Job Researcher
|
4 |
goal: >
|
5 |
+
Make sure to do amazing analysis on
|
6 |
+
job posting to help job applicants
|
7 |
backstory: >
|
8 |
+
As a Job Researcher, your prowess in
|
9 |
+
navigating and extracting critical
|
10 |
+
information from job postings is unmatched.
|
11 |
+
Your skills help pinpoint the necessary
|
12 |
+
qualifications and skills sought
|
13 |
+
by employers, forming the foundation for
|
14 |
+
effective application tailoring.
|
15 |
+
|
16 |
+
profiler:
|
17 |
+
role: >
|
18 |
+
Personal Profiler for Engineers
|
19 |
+
goal: >
|
20 |
+
Make sure to do amazing analysis on
|
21 |
+
job posting to help job applicants
|
22 |
+
backstory: >
|
23 |
+
Equipped with analytical prowess, you dissect
|
24 |
+
and synthesize information
|
25 |
+
from diverse sources to craft comprehensive
|
26 |
+
personal and professional profiles, laying the
|
27 |
+
groundwork for personalized resume enhancements.
|
28 |
+
|
29 |
+
resume_strategist:
|
30 |
+
role: >
|
31 |
+
Resume Strategist
|
32 |
+
goal: >
|
33 |
+
Find all the best ways to make a
|
34 |
+
resume stand out in the job market.
|
35 |
+
backstory: >
|
36 |
+
With a strategic mind and an eye for detail, you
|
37 |
+
excel at refining resumes to highlight the most
|
38 |
+
relevant skills and experiences, ensuring they
|
39 |
+
resonate perfectly with the job's requirements.
|
resume_maker_ai_agent/config/tasks.yaml
CHANGED
@@ -1,7 +1,45 @@
|
|
1 |
-
|
2 |
description: >
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
expected_output: >
|
6 |
-
A list of
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
research_task:
|
2 |
description: >
|
3 |
+
Analyze the job description to extract key skills, experiences, and qualifications
|
4 |
+
required. Use the tools to gather content and identify
|
5 |
+
and categorize the requirements.
|
6 |
+
|
7 |
+
Job Description:
|
8 |
+
---
|
9 |
+
({job_description})
|
10 |
+
---
|
11 |
expected_output: >
|
12 |
+
A structured list of job requirements, including necessary
|
13 |
+
skills, qualifications, and experiences.
|
14 |
+
agent: job_researcher
|
15 |
+
|
16 |
+
profile_task:
|
17 |
+
description: >
|
18 |
+
Compile a detailed personal and professional profile
|
19 |
+
using provided resume:
|
20 |
+
|
21 |
+
Resume:
|
22 |
+
---
|
23 |
+
({resume_text})
|
24 |
+
---
|
25 |
+
Utilize tools to extract and synthesize information from these sources.
|
26 |
+
expected_output: >
|
27 |
+
A comprehensive profile document that includes skills,
|
28 |
+
project experiences, contributions, interests, and
|
29 |
+
communication style.
|
30 |
+
agent: profiler
|
31 |
+
|
32 |
+
resume_strategy_task:
|
33 |
+
description: >
|
34 |
+
Using the profile and job requirements obtained from
|
35 |
+
previous tasks, tailor the resume to highlight the most
|
36 |
+
relevant areas. Employ tools to adjust and enhance the
|
37 |
+
resume content. Make sure this is the best resume even but
|
38 |
+
don't make up any information. Update every section,
|
39 |
+
inlcuding the initial summary, work experience, skills,
|
40 |
+
and education. All to better reflrect the candidates
|
41 |
+
abilities and how it matches the job posting.
|
42 |
+
expected_output: >
|
43 |
+
An updated resume that effectively highlights the candidate's
|
44 |
+
qualifications and experiences relevant to the job.
|
45 |
+
agent: resume_strategist
|
resume_maker_ai_agent/crew.py
CHANGED
@@ -3,42 +3,105 @@ from crewai.project import CrewBase, agent, crew, task
|
|
3 |
|
4 |
from resume_maker_ai_agent.models.response_models import MusicDetails
|
5 |
from resume_maker_ai_agent.tools.custom_tool import search_tool
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
@CrewBase
|
9 |
-
class
|
10 |
-
"""
|
11 |
|
12 |
agents_config = "config/agents.yaml"
|
13 |
tasks_config = "config/tasks.yaml"
|
14 |
|
15 |
@agent
|
16 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
"""
|
18 |
-
Creates
|
19 |
|
20 |
-
This agent is responsible for
|
21 |
-
and returning the results in a structured format.
|
22 |
|
23 |
:return: An instance of the Agent class
|
24 |
"""
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
@task
|
28 |
-
def
|
29 |
"""
|
30 |
-
Creates the
|
31 |
|
32 |
-
This task is responsible for
|
33 |
-
|
34 |
|
35 |
:return: An instance of the Task class
|
36 |
"""
|
37 |
|
38 |
return Task(
|
39 |
-
config=self.tasks_config["
|
40 |
-
|
41 |
-
output_json=MusicDetails,
|
42 |
)
|
43 |
|
44 |
@crew
|
|
|
3 |
|
4 |
from resume_maker_ai_agent.models.response_models import MusicDetails
|
5 |
from resume_maker_ai_agent.tools.custom_tool import search_tool
|
6 |
+
import warnings
|
7 |
+
|
8 |
+
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
9 |
|
10 |
|
11 |
@CrewBase
|
12 |
+
class ResumeMakerAIAgent:
|
13 |
+
"""ResumeMakerAIAgent crew"""
|
14 |
|
15 |
agents_config = "config/agents.yaml"
|
16 |
tasks_config = "config/tasks.yaml"
|
17 |
|
18 |
@agent
|
19 |
+
def job_researcher(self) -> Agent:
|
20 |
+
"""
|
21 |
+
Creates the job researcher agent.
|
22 |
+
|
23 |
+
This agent is responsible for searching for job openings.
|
24 |
+
|
25 |
+
:return: An instance of the Agent class
|
26 |
+
"""
|
27 |
+
|
28 |
+
return Agent(config=self.agents_config["job_researcher"],
|
29 |
+
tools=[search_tool],
|
30 |
+
verbose=True)
|
31 |
+
|
32 |
+
@agent
|
33 |
+
def profiler(self) -> Agent:
|
34 |
"""
|
35 |
+
Creates the profiler agent.
|
36 |
|
37 |
+
This agent is responsible for researching and generating music details.
|
|
|
38 |
|
39 |
:return: An instance of the Agent class
|
40 |
"""
|
41 |
+
|
42 |
+
return Agent(config=self.agents_config["profiler"],
|
43 |
+
tools=[search_tool],
|
44 |
+
verbose=True)
|
45 |
+
|
46 |
+
@agent
|
47 |
+
def resume_strategist(self) -> Agent:
|
48 |
+
"""
|
49 |
+
Creates the resume strategist agent.
|
50 |
+
|
51 |
+
This agent is responsible for customizing resumes based on job
|
52 |
+
descriptions.
|
53 |
+
|
54 |
+
:return: An instance of the Agent class
|
55 |
+
"""
|
56 |
+
|
57 |
+
return Agent(config=self.agents_config["resume_strategist"],
|
58 |
+
tools=[search_tool],
|
59 |
+
verbose=True)
|
60 |
+
|
61 |
+
@task
|
62 |
+
def research_task(self) -> Task:
|
63 |
+
"""
|
64 |
+
Creates the research task.
|
65 |
+
|
66 |
+
This task is responsible for searching for job openings.
|
67 |
+
|
68 |
+
:return: An instance of the Task class
|
69 |
+
"""
|
70 |
+
|
71 |
+
return Task(
|
72 |
+
config=self.tasks_config["research_task"],
|
73 |
+
async_execution=True
|
74 |
+
)
|
75 |
+
|
76 |
+
@task
|
77 |
+
def profile_task(self) -> Task:
|
78 |
+
"""
|
79 |
+
Creates the profile task.
|
80 |
+
|
81 |
+
This task is responsible for researching and generating music details.
|
82 |
+
|
83 |
+
:return: An instance of the Task class
|
84 |
+
"""
|
85 |
+
|
86 |
+
return Task(
|
87 |
+
config=self.tasks_config["profile_task"],
|
88 |
+
async_execution=True
|
89 |
+
)
|
90 |
|
91 |
@task
|
92 |
+
def profile_task(self) -> Task:
|
93 |
"""
|
94 |
+
Creates the profile task.
|
95 |
|
96 |
+
This task is responsible for generating a profile based on the output
|
97 |
+
of the research task.
|
98 |
|
99 |
:return: An instance of the Task class
|
100 |
"""
|
101 |
|
102 |
return Task(
|
103 |
+
config=self.tasks_config["profile_task"],
|
104 |
+
context=[research_task, profile_task],
|
|
|
105 |
)
|
106 |
|
107 |
@crew
|
resume_maker_ai_agent/services/app_service.py
CHANGED
@@ -1,41 +1,50 @@
|
|
1 |
import warnings
|
|
|
|
|
|
|
2 |
|
3 |
-
from resume_maker_ai_agent.crew import
|
4 |
-
from
|
5 |
|
6 |
-
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
def search_music(query: str) -> list[dict]:
|
10 |
-
music_details = []
|
11 |
-
try:
|
12 |
-
# Search the internet for music
|
13 |
-
print(f"Searching for music: {query}")
|
14 |
-
search_results = search_internet(query)
|
15 |
-
print(f"Found {len(search_results)} results")
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
print(f"Music details: {music_details}")
|
22 |
-
print("Done")
|
23 |
-
except Exception as e:
|
24 |
-
print(f"An error occurred: {e!s}")
|
25 |
|
26 |
-
|
|
|
|
|
|
|
27 |
|
|
|
|
|
28 |
|
29 |
-
def search_internet(query: str) -> list[dict]:
|
30 |
# Run the crew
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
return links if isinstance(links, list) else []
|
35 |
|
|
|
36 |
|
37 |
-
def get_music_details(songs: list[dict]) -> list[dict]:
|
38 |
-
# Get music details by scrapping the pages
|
39 |
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import warnings
|
2 |
+
import PyPDF2
|
3 |
+
# from docx import Document
|
4 |
+
import io
|
5 |
|
6 |
+
from resume_maker_ai_agent.crew import ResumeMakerAIAgent
|
7 |
+
from streamlit.runtime.uploaded_file_manager import UploadedFile
|
8 |
|
|
|
9 |
|
10 |
+
def _extract_text_from_pdf(pdf_file_path):
|
11 |
+
"""Extract text content from uploaded PDF file."""
|
12 |
+
pdf_reader = PyPDF2.PdfReader(pdf_file_path)
|
13 |
+
text = ""
|
14 |
+
for page in pdf_reader.pages:
|
15 |
+
text += page.extract_text()
|
16 |
+
return text
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
def run(pdf_file_path: UploadedFile, job_description: str) -> str:
|
20 |
+
"""
|
21 |
+
Processes a PDF resume file, customizes it based on the job description,
|
22 |
+
and returns the updated resume text.
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
:param pdf_file_path: Path to the PDF file containing the resume.
|
25 |
+
:param job_description: Description of the job for which the resume needs to be customized.
|
26 |
+
:return: A string representing the updated resume content.
|
27 |
+
"""
|
28 |
|
29 |
+
print("Extracting text from PDF")
|
30 |
+
resume_text = _extract_text_from_pdf(pdf_file_path)
|
31 |
|
|
|
32 |
# Run the crew
|
33 |
+
print("Running the crew")
|
34 |
+
inputs = {"resume_text": resume_text, "job_description": job_description}
|
35 |
+
result = ResumeMakerAIAgent().crew().kickoff(inputs=inputs)
|
|
|
36 |
|
37 |
+
return result.raw
|
38 |
|
|
|
|
|
39 |
|
40 |
+
def create_docx(content):
|
41 |
+
"""Create a Word document with the content."""
|
42 |
+
# doc = Document()
|
43 |
+
# doc.add_paragraph(content)
|
44 |
+
|
45 |
+
# # Save to bytes buffer
|
46 |
+
# buffer = io.BytesIO()
|
47 |
+
# doc.save(buffer)
|
48 |
+
# buffer.seek(0)
|
49 |
+
# return buffer
|
50 |
+
return none
|
resume_maker_ai_agent/services/web_scarapper_service.py
DELETED
@@ -1,226 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
from datetime import datetime
|
3 |
-
from typing import Any
|
4 |
-
from urllib.parse import urlparse
|
5 |
-
|
6 |
-
import requests
|
7 |
-
from bs4 import BeautifulSoup
|
8 |
-
from selenium import webdriver
|
9 |
-
from selenium.common.exceptions import TimeoutException
|
10 |
-
from selenium.webdriver.chrome.service import Service
|
11 |
-
from selenium.webdriver.common.by import By
|
12 |
-
from selenium.webdriver.support import expected_conditions as EC
|
13 |
-
from selenium.webdriver.support.ui import WebDriverWait
|
14 |
-
|
15 |
-
|
16 |
-
def _setup_driver() -> webdriver.Chrome:
|
17 |
-
# Setup Chrome WebDriver (or other driver)
|
18 |
-
options = webdriver.ChromeOptions()
|
19 |
-
|
20 |
-
# Essential container arguments
|
21 |
-
options.add_argument("--headless=new")
|
22 |
-
options.add_argument("--disable-gpu")
|
23 |
-
options.add_argument("--no-sandbox")
|
24 |
-
options.add_argument("--disable-dev-shm-usage")
|
25 |
-
|
26 |
-
# JavaScript-specific configurations
|
27 |
-
options.add_argument("--enable-javascript")
|
28 |
-
options.add_argument("--disable-web-security")
|
29 |
-
options.add_argument("--allow-running-insecure-content")
|
30 |
-
|
31 |
-
# Performance optimizations
|
32 |
-
options.add_argument("--window-size=1920,1080")
|
33 |
-
options.add_argument("--disable-extensions")
|
34 |
-
options.add_argument("--disable-setuid-sandbox")
|
35 |
-
|
36 |
-
# Memory management
|
37 |
-
options.add_argument("--disable-dev-tools")
|
38 |
-
options.add_argument("--no-zygote")
|
39 |
-
options.add_argument("--single-process")
|
40 |
-
|
41 |
-
# Handle Chrome Driver installation
|
42 |
-
try:
|
43 |
-
# For container environments, specify the Chrome version
|
44 |
-
print("Setting up Chrome WebDriver")
|
45 |
-
chrome_service = Service("/usr/bin/chromedriver")
|
46 |
-
driver = webdriver.Chrome(service=chrome_service, options=options)
|
47 |
-
driver = webdriver.Chrome(options=options)
|
48 |
-
except Exception as e:
|
49 |
-
# Fallback to direct path if ChromeDriverManager fails
|
50 |
-
print(f"An error occurred: {e!s}")
|
51 |
-
print("Falling back to direct path")
|
52 |
-
|
53 |
-
driver = webdriver.Chrome(options=options)
|
54 |
-
return driver
|
55 |
-
|
56 |
-
|
57 |
-
def _get_downloadable_audio_link(url: str) -> str:
|
58 |
-
if not url:
|
59 |
-
return ""
|
60 |
-
|
61 |
-
# Extract the album ID and file ID from the URL
|
62 |
-
parsed_url = urlparse(url)
|
63 |
-
path_parts = parsed_url.path.split("/")
|
64 |
-
album_id = path_parts[-2]
|
65 |
-
file_id = path_parts[-1].split(".")[0]
|
66 |
-
|
67 |
-
# Construct the downloadable audio link
|
68 |
-
return f"https://aac.saavncdn.com/{album_id}/{file_id}.mp4"
|
69 |
-
|
70 |
-
|
71 |
-
def _extract_musician_name(url: str) -> str:
|
72 |
-
return url.split("/")[-2].replace("-songs", "").replace("-", " ").title()
|
73 |
-
|
74 |
-
|
75 |
-
def scrape_dynamic_page(url: str, wait_time: int = 5) -> dict[str, Any]:
|
76 |
-
"""
|
77 |
-
Scrape a webpage including content loaded by JavaScript
|
78 |
-
|
79 |
-
Parameters:
|
80 |
-
url (str): The URL to scrape
|
81 |
-
wait_time (int): Maximum time to wait for dynamic content to load
|
82 |
-
|
83 |
-
Returns:
|
84 |
-
dict: Dictionary containing various elements from the page
|
85 |
-
"""
|
86 |
-
driver = _setup_driver()
|
87 |
-
|
88 |
-
try:
|
89 |
-
# Load the page
|
90 |
-
driver.get(url)
|
91 |
-
|
92 |
-
# Wait for the button to be present
|
93 |
-
button = WebDriverWait(driver, wait_time).until(
|
94 |
-
EC.presence_of_element_located((By.CSS_SELECTOR, 'a.c-btn.c-btn--primary[data-btn-icon="q"]'))
|
95 |
-
)
|
96 |
-
|
97 |
-
# Check visibility and enablement
|
98 |
-
is_displayed = button.is_displayed()
|
99 |
-
is_enabled = button.is_enabled()
|
100 |
-
print(f"Button displayed: {is_displayed}, Button enabled: {is_enabled}")
|
101 |
-
|
102 |
-
if is_displayed and is_enabled:
|
103 |
-
# Click the button
|
104 |
-
driver.execute_script("arguments[0].scrollIntoView(true);", button)
|
105 |
-
driver.execute_script("arguments[0].click();", button)
|
106 |
-
else:
|
107 |
-
print("Button is not interactable!")
|
108 |
-
|
109 |
-
# Wait a moment for any JavaScript updates
|
110 |
-
time.sleep(5)
|
111 |
-
|
112 |
-
# Get the updated HTML
|
113 |
-
html_content = driver.page_source
|
114 |
-
soup = BeautifulSoup(html_content, "html.parser")
|
115 |
-
|
116 |
-
# Extract elements
|
117 |
-
details = {
|
118 |
-
"album_title": soup.title.text if soup.title else "",
|
119 |
-
"description": soup.find("meta", {"name": "description"})["content"]
|
120 |
-
if soup.find("meta", {"name": "description"})
|
121 |
-
else "",
|
122 |
-
"album_description": soup.find("meta", {"property": "og:description"})["content"]
|
123 |
-
if soup.find("meta", {"property": "og:description"})
|
124 |
-
else "",
|
125 |
-
"album_url": soup.find("meta", {"property": "music:album"})["content"]
|
126 |
-
if soup.find("meta", {"property": "music:album"})
|
127 |
-
else "",
|
128 |
-
"album_image_url": soup.find("meta", {"property": "twitter:image"})["content"]
|
129 |
-
if soup.find("meta", {"property": "twitter:image"})
|
130 |
-
else "",
|
131 |
-
"song_info": {
|
132 |
-
"name": soup.title.text if soup.title else "",
|
133 |
-
"title": soup.find("meta", {"property": "twitter:title"})["content"]
|
134 |
-
if soup.find("meta", {"property": "twitter:title"})
|
135 |
-
else "",
|
136 |
-
"musician": [
|
137 |
-
_extract_musician_name(musician["content"])
|
138 |
-
for musician in soup.find_all("meta", {"property": "music:musician"})
|
139 |
-
],
|
140 |
-
"release_date": datetime.strptime(
|
141 |
-
soup.find("meta", {"property": "music:release_date"})["content"],
|
142 |
-
"%Y-%m-%d",
|
143 |
-
).strftime("%B %d, %Y")
|
144 |
-
if soup.find("meta", {"property": "music:release_date"})
|
145 |
-
else "",
|
146 |
-
"song_url": soup.find("meta", {"property": "twitter:url"})["content"]
|
147 |
-
if soup.find("meta", {"property": "twitter:url"})
|
148 |
-
else "",
|
149 |
-
"description": soup.find("meta", {"property": "twitter:description"})["content"]
|
150 |
-
if soup.find("meta", {"property": "twitter:description"})
|
151 |
-
else "",
|
152 |
-
"downloadable_url": _get_downloadable_audio_link(
|
153 |
-
soup.find("audio").find("source")["src"] if soup.find("audio").find("source") else ""
|
154 |
-
),
|
155 |
-
"song_lyrics_url": "https://www.jiosaavn.com" + soup.find("a", title="Song Lyrics")["href"]
|
156 |
-
if soup.find("a", title="Song Lyrics")
|
157 |
-
else "",
|
158 |
-
},
|
159 |
-
}
|
160 |
-
except TimeoutException:
|
161 |
-
print(f"Timeout waiting for page to load: {url}")
|
162 |
-
return {}
|
163 |
-
except Exception as e:
|
164 |
-
print(f"An error occurred: {e!s}")
|
165 |
-
return {}
|
166 |
-
else:
|
167 |
-
return details
|
168 |
-
finally:
|
169 |
-
driver.quit()
|
170 |
-
|
171 |
-
|
172 |
-
def scrape_pages(urls: list[str]) -> list[dict]:
|
173 |
-
"""
|
174 |
-
Scrape multiple webpages and return a list of elements
|
175 |
-
|
176 |
-
Parameters:
|
177 |
-
urls (list of str): List of URLs to scrape
|
178 |
-
wait_time (int): Maximum time to wait for dynamic content to load
|
179 |
-
|
180 |
-
Returns:
|
181 |
-
list of dict: List of dictionaries containing various elements from each page
|
182 |
-
"""
|
183 |
-
results = []
|
184 |
-
for url in urls:
|
185 |
-
details = scrape_dynamic_page(url)
|
186 |
-
if details:
|
187 |
-
results.append(details)
|
188 |
-
return results
|
189 |
-
|
190 |
-
|
191 |
-
def download_file(url: str) -> None:
|
192 |
-
"""
|
193 |
-
Download a file from a URL and save it to a local file
|
194 |
-
|
195 |
-
Parameters:
|
196 |
-
url (str): URL of the file to be downloaded
|
197 |
-
|
198 |
-
Returns:
|
199 |
-
None
|
200 |
-
"""
|
201 |
-
try:
|
202 |
-
response = requests.get(url, stream=True, timeout=10)
|
203 |
-
|
204 |
-
# Check if the request was successful
|
205 |
-
if response.status_code == 200:
|
206 |
-
# Open a local file with the specified filename in binary write mode
|
207 |
-
filename = _get_filename_name(url)
|
208 |
-
filename = f"downloads/{filename}.mp4"
|
209 |
-
|
210 |
-
with open(filename, "wb") as file:
|
211 |
-
# Write the content of the response to the file in chunks
|
212 |
-
for chunk in response.iter_content(chunk_size=8192):
|
213 |
-
file.write(chunk)
|
214 |
-
print(f"File downloaded successfully as '{filename}'")
|
215 |
-
else:
|
216 |
-
print(f"Failed to download file. HTTP Status Code: {response.status_code}")
|
217 |
-
except requests.exceptions.Timeout:
|
218 |
-
print(f"Request to {url} timed out.")
|
219 |
-
except requests.exceptions.RequestException as e:
|
220 |
-
print(f"Request to {url} failed: {e}")
|
221 |
-
|
222 |
-
|
223 |
-
def _get_filename_name(url: str) -> str:
|
224 |
-
parsed_url = urlparse(url)
|
225 |
-
path_parts = parsed_url.path.split("/")
|
226 |
-
return path_parts[2]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
resume_maker_ai_agent/tools/custom_tool.py
CHANGED
@@ -1,9 +1,4 @@
|
|
1 |
-
from crewai_tools import FileWriterTool,
|
2 |
|
3 |
-
jio_savan_scapper_tool = ScrapeWebsiteTool(website_url="https://www.jiosaavn.com")
|
4 |
file_writer_tool = FileWriterTool()
|
5 |
-
search_tool = SerperDevTool(
|
6 |
-
country="in", # Set to 'in' for India
|
7 |
-
locale="en", # Set locale to English
|
8 |
-
n_results=5, # You can adjust the number of results as needed
|
9 |
-
)
|
|
|
1 |
+
from crewai_tools import FileWriterTool, SerperDevTool
|
2 |
|
|
|
3 |
file_writer_tool = FileWriterTool()
|
4 |
+
search_tool = SerperDevTool()
|
|
|
|
|
|
|
|