Rauhan commited on
Commit
2350624
β€’
1 Parent(s): 820c4cb

UPLOAD: code upload

Browse files
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Rauhan Ahmed Siddiqui
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.pipelines.completePipeline import Pipeline
2
+ from src.utils.functions import getImages
3
+ import gradio as gr
4
+
5
+ # initializing the pipeline for clothing and necklace try-ons
6
+ pipeline = Pipeline()
7
+
8
+ # loading a set of images for examples
9
+ allImages = getImages(nImages = 100)
10
+
11
+ # creating a Gradio interface using Blocks
12
+ with gr.Blocks(title = "GemFit") as interface:
13
+ # Row for input images
14
+ with gr.Row():
15
+ inputImage = gr.Image(label = "Input Image", type = "pil", image_mode = "RGB", interactive = True)
16
+ selectedNecklace = gr.Image(label = "Selected Necklace", type = "pil", image_mode = "RGBA", visible = False)
17
+ necklaceTryOn = gr.Image(label = "Necklace Try-On", type = "pil", interactive = False)
18
+
19
+ # Row for model examples
20
+ with gr.Row():
21
+ gr.Examples(examples = allImages["models"], inputs = [inputImage], label = "Models")
22
+
23
+ # Row for choker examples
24
+ with gr.Row():
25
+ gr.Examples(examples = allImages["chokers"], inputs = [selectedNecklace], label = "Chokers")
26
+
27
+ # Row for short necklace examples
28
+ with gr.Row():
29
+ gr.Examples(examples = allImages["shortNecklaces"], inputs = [selectedNecklace], label = "Short Necklaces")
30
+
31
+ # Row for long necklace examples
32
+ with gr.Row():
33
+ gr.Examples(examples = allImages["longNecklaces"], inputs = [selectedNecklace], label = "Long Necklaces")
34
+
35
+ # Row for output images
36
+ with gr.Row():
37
+ outputOne = gr.Image(label = "Output 1", interactive = False)
38
+ outputTwo = gr.Image(label = "Output 2", interactive = False)
39
+ outputThree = gr.Image(label = "Output 3", interactive = False)
40
+
41
+ # Row for the submit button
42
+ with gr.Row():
43
+ submit = gr.Button("Enter")
44
+
45
+ # Connect input changes to the necklace try-on function
46
+ selectedNecklace.change(fn = pipeline.necklaceTryOn, inputs = [inputImage, selectedNecklace], outputs = [necklaceTryOn])
47
+
48
+ # Connect the submit button to the clothing try-on function
49
+ submit.click(fn = pipeline.clothingTryOn, inputs = [inputImage, selectedNecklace], outputs = [outputOne, outputTwo, outputThree])
50
+
51
+ # Launch the Gradio interface with debug mode enabled
52
+ interface.launch(server_name = "0.0.0.0", server_port = 7860)
config.ini ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [CLOTHING TRY ON]
2
+ device = cuda
3
+ modelId = stabilityai/stable-diffusion-2-inpainting
4
+
5
+ [NECKLACE TRY ON]
6
+ offsetFactor = 0.8
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ appwrite==7.0.0
2
+ cvzone==1.6.1
3
+ diffusers==0.30.3
4
+ gradio==5.1.0
5
+ mediapipe==0.10.15
6
+ numpy==1.26.4
7
+ opencv-python==4.10.0.84
8
+ pillow==10.4.0
9
+ torch
10
+ transformers==4.44.2
setup.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ HYPEN_E_DOT = "-e ."
4
+ def getRequirements() -> list[str]:
5
+ with open("requirements.txt", "r") as file:
6
+ requirements = file.read().split("\n")
7
+ requirements.remove(HYPEN_E_DOT)
8
+ return requirements
9
+
10
+ setup(
11
+ name = "GemFit",
12
+ version = "0.1",
13
+ author = "Rauhan Ahmed Siddiqui",
14
+ author_email = "rauhaan.siddiqui@gmail.com",
15
+ packages = find_packages(where = "."),
16
+ requires = getRequirements()
17
+ )
src/__init__.py ADDED
File without changes
src/components/__init__.py ADDED
File without changes
src/components/clothingTryOn.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionInpaintPipeline
2
+ from src.utils.exceptions import CustomException
3
+ from cvzone.PoseModule import PoseDetector
4
+ from src.utils.functions import getConfig
5
+ from src.utils.logger import logger
6
+ from PIL.ImageOps import grayscale
7
+ from PIL import Image
8
+ import numpy as np
9
+ import cvzone
10
+ import torch
11
+ import math
12
+ import cv2
13
+ import gc
14
+
15
+ class ClothingTryOn:
16
+ """
17
+ A class to simulate clothing try-ons by overlaying clothing images on user images
18
+ and generating modified outputs using inpainting techniques.
19
+
20
+ This class utilizes a pose detection model to identify key landmarks on the user's
21
+ body, allowing for accurate placement and scaling of clothing images. It integrates
22
+ with a Stable Diffusion model for image generation, providing realistic visual
23
+ outputs based on specified prompts while ensuring that jewelry and accessories
24
+ do not interfere with the clothing representation.
25
+
26
+ Attributes:
27
+ detector (PoseDetector): An instance of PoseDetector for identifying body landmarks.
28
+ config (ConfigParser): Configuration settings loaded from an external config file.
29
+ pipeline (StableDiffusionInpaintPipeline): The Stable Diffusion inpainting model for
30
+ generating images based on user prompts and masks.
31
+
32
+ Methods:
33
+ getBinaryMask(image: Image.Image, jewellery: Image.Image) -> tuple[Image.Image]:
34
+ Generates a binary mask indicating the presence of the clothing on the user's image.
35
+
36
+ generateImage(image: Image.Image, mask: Image.Image) -> tuple[Image.Image]:
37
+ Applies inpainting to an image using the provided binary mask, generating new images
38
+ based on specific color prompts while excluding jewelry and accessories.
39
+ """
40
+
41
+ def __init__(self):
42
+ """Initialize the NecklaceTryOn class with a PoseDetector and configuration settings."""
43
+ self.detector = PoseDetector()
44
+ self.config = getConfig("config.ini")
45
+ modelId = self.config.get("CLOTHING TRY ON", "modelId")
46
+ device = self.config.get("CLOTHING TRY ON", "device")
47
+ self.pipeline = StableDiffusionInpaintPipeline.from_pretrained(
48
+ modelId, torch_dtype = torch.float16
49
+ ).to(device)
50
+
51
+ def getBinaryMask(self, image: Image.Image, jewellery: Image.Image) -> tuple[Image.Image]:
52
+ """
53
+ Generate a binary mask indicating the presence of the necklace on the user's image.
54
+
55
+ This function overlays a jewelry image on the user's image and creates a binary mask, where
56
+ the necklace is represented in white and the background in black.
57
+
58
+ Args:
59
+ image (Image.Image): The user's image, ideally captured in a standing, upright position.
60
+ jewellery (Image.Image): The image of the jewelry piece (e.g., necklace) to be overlaid.
61
+
62
+ Returns:
63
+ tuple[Image.Image]: A tuple containing:
64
+ - The first image as the necklace try-on output.
65
+ - The second image as the binary mask, with the necklace shown in white and the background in black.
66
+
67
+ Raises:
68
+ CustomException: If an error occurs during the image processing.
69
+ """
70
+ try:
71
+ logger.info("converting images to numpy arrays")
72
+ image = np.array(image)
73
+ jewellery = np.array(jewellery)
74
+
75
+ logger.info("creating a copy of original image for actual overlay")
76
+ copyImage = image.copy()
77
+
78
+ logger.info("detecting body landmarks from the input image")
79
+ image = self.detector.findPose(image)
80
+ lmList, _ = self.detector.findPosition(image, bboxWithHands = False, draw = False)
81
+ pt12, pt11, pt10, pt9 = (
82
+ lmList[12][:2],
83
+ lmList[11][:2],
84
+ lmList[10][:2],
85
+ lmList[9][:2],
86
+ )
87
+
88
+ logger.info("calculating the precise neck points")
89
+ avgX1 = int(pt12[0] + (pt10[0] - pt12[0]) / 1.75)
90
+ avgY1 = int(pt12[1] - (pt12[1] - pt10[1]) / 1.75)
91
+ avgX2 = int(pt11[0] - (pt11[0] - pt9[0]) / 1.75)
92
+ avgY2 = int(pt11[1] - (pt11[1] - pt9[1]) / 1.75)
93
+
94
+ logger.info("rescaling the necklace to appropriate dimensions")
95
+ xDist = avgX2 - avgX1
96
+ origImgRatio = xDist / jewellery.shape[1]
97
+ yDist = jewellery.shape[0] * origImgRatio
98
+ jewellery = cv2.resize(
99
+ jewellery, (int(xDist), int(yDist)), interpolation = cv2.INTER_CUBIC
100
+ )
101
+
102
+ logger.info("calculating required offset to be added to the necklace image for perfect fitting")
103
+ imageGray = cv2.cvtColor(jewellery, cv2.COLOR_BGRA2GRAY)
104
+ for offsetOrig in range(imageGray.shape[1]):
105
+ pixelValue = imageGray[0, :][offsetOrig]
106
+ if (pixelValue != 255) & (pixelValue != 0):
107
+ break
108
+ else:
109
+ continue
110
+ offset = int(self.config.getfloat("NECKLACE TRY ON", "offsetFactor") * xDist * (offsetOrig / jewellery.shape[1]))
111
+ yCoordinate = avgY1 - offset
112
+
113
+ logger.info("tilting the necklace image as per the necklace points")
114
+ angle = math.ceil(
115
+ self.detector.findAngle(
116
+ p1 = (avgX2, avgY2), p2 = (avgX1, avgY1), p3 = (avgX2, avgY1)
117
+ )[0]
118
+ )
119
+ if avgY2 < avgY1:
120
+ pass
121
+ else:
122
+ angle = angle * -1
123
+ jewellery = cvzone.rotateImage(jewellery, angle)
124
+
125
+ logger.info("checking if the necklace is getting out of the frame and trimming from above if needed")
126
+ availableSpace = copyImage.shape[0] - yCoordinate
127
+ extra = jewellery.shape[0] - availableSpace
128
+
129
+ logger.info("applying the calculated settings")
130
+ if extra > 0:
131
+ jewellery = jewellery[extra + 10 :, :]
132
+ return self.getBinaryMask(
133
+ Image.fromarray(copyImage), Image.fromarray(jewellery)
134
+ )
135
+ else:
136
+ tryOnOutput = cvzone.overlayPNG(copyImage, jewellery, (avgX1, yCoordinate))
137
+ tryOnOutput = Image.fromarray(tryOnOutput.astype(np.uint8))
138
+ blackedNecklace = np.zeros(shape = copyImage.shape)
139
+ cvzone.overlayPNG(blackedNecklace, jewellery, (avgX1, yCoordinate))
140
+ blackedNecklace = cv2.cvtColor(blackedNecklace.astype(np.uint8), cv2.COLOR_BGR2GRAY)
141
+ binaryMask = blackedNecklace * ((blackedNecklace > 5) * 255)
142
+ binaryMask[binaryMask >= 255] = 255
143
+ binaryMask[binaryMask < 255] = 0
144
+ binaryMask = Image.fromarray(binaryMask.astype(np.uint8))
145
+ return (tryOnOutput, binaryMask)
146
+
147
+ except Exception as e:
148
+ logger.error(CustomException(e))
149
+ print(CustomException(e))
150
+
151
+
152
+ def generateImage(self, image: Image.Image, mask: Image.Image) -> tuple[Image.Image]:
153
+ """
154
+ Apply inpainting to an image using the provided binary mask.
155
+
156
+ This function utilizes the binary mask to inpaint areas of the image, enhancing the visual output
157
+ by generating new images based on specific color prompts while excluding jewelry and other accessories.
158
+
159
+ Args:
160
+ image (Image.Image): The input image where inpainting will be applied.
161
+ mask (Image.Image): The binary mask indicating areas to be inpainted.
162
+
163
+ Returns:
164
+ tuple: A tuple containing three images generated based on different color prompts.
165
+
166
+ Raises:
167
+ CustomException: If an error occurs during the image processing.
168
+ """
169
+ try:
170
+ logger.info("creating a mask where the jewellery is represented")
171
+ jewelleryMask = Image.fromarray(np.bitwise_and(np.array(mask.convert("RGB")), np.array(image.convert("RGB"))))
172
+ arrOrig = np.array(grayscale(mask))
173
+
174
+ logger.info("inpainting the image using the original mask")
175
+ image = cv2.inpaint(np.array(image), arrOrig, 15, cv2.INPAINT_TELEA)
176
+ image = Image.fromarray(image)
177
+
178
+ logger.info("preparing the mask for processing")
179
+ arr = arrOrig.copy()
180
+ maskY = np.where(arr == arr[arr != 0][0])[0][0]
181
+ arr[maskY:, :] = 255
182
+ newMask = Image.fromarray(arr)
183
+ mask = newMask.copy()
184
+
185
+ logger.info("resizing images for consistency")
186
+ origSize = image.size
187
+ image = image.resize((512, 512))
188
+ mask = mask.resize((512, 512))
189
+
190
+ logger.info("generating images for different colors")
191
+ results = []
192
+ for colour in ["Red", "Blue", "Green"]:
193
+ prompt = f"{colour}, South Indian Saree, properly worn, natural setting, elegant, natural look, neckline without jewellery, simple"
194
+ negativePrompt = ("necklaces, jewellery, jewelry, necklace, neckpiece, garland, chain, neck wear, "
195
+ "jewelled neck, jeweled neck, necklace on neck, jewellery on neck, accessories, "
196
+ "watermark, text, changed background, wider body, narrower body, bad proportions, "
197
+ "extra limbs, mutated hands, changed sizes, altered proportions, unnatural body proportions, "
198
+ "blurry, ugly")
199
+ output = self.pipeline(
200
+ prompt = prompt,
201
+ negative_prompt = negativePrompt,
202
+ image = image,
203
+ mask_image = mask,
204
+ strength = 0.95,
205
+ guidance_score = 9,
206
+ ).images[0]
207
+
208
+ logger.info("resizing the output to original size")
209
+ output = output.resize(origSize)
210
+ tempGenerated = np.bitwise_and(
211
+ np.array(output),
212
+ np.bitwise_not(np.array(Image.fromarray(arrOrig).convert("RGB"))),
213
+ )
214
+ results.append(tempGenerated)
215
+
216
+ logger.info("combining the results with the jewellery mask")
217
+ results = [
218
+ Image.fromarray(np.bitwise_or(x, np.array(jewelleryMask))) for x in results
219
+ ]
220
+
221
+ logger.info("Image generation completed successfully.")
222
+ gc.collect()
223
+ torch.cuda.empty_cache()
224
+ return (results[0], results[1], results[2])
225
+
226
+ except Exception as e:
227
+ logger.error(CustomException(e))
228
+ print(CustomException(e))
src/components/necklaceTryOn.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.utils.exceptions import CustomException
2
+ from cvzone.PoseModule import PoseDetector
3
+ from src.utils.functions import getConfig
4
+ from src.utils.logger import logger
5
+ from PIL import Image
6
+ import numpy as np
7
+ import cvzone
8
+ import math
9
+ import cv2
10
+
11
+ class NecklaceTryOn:
12
+ """
13
+ A class for simulating the wearing of necklaces in images.
14
+
15
+ This class utilizes a pose detection algorithm to accurately overlay
16
+ a necklace image onto a user's photo, adjusting for the user's neck
17
+ position and orientation.
18
+
19
+ Attributes:
20
+ detector (PoseDetector): An instance of the PoseDetector for identifying
21
+ body landmarks in images.
22
+ config (ConfigParser): Configuration settings loaded from a specified
23
+ configuration file (config.ini).
24
+
25
+ Methods:
26
+ necklaceTryOn(image: Image.Image, jewellery: Image.Image) -> Image.Image:
27
+ Overlays a necklace onto the user's image based on detected pose
28
+ landmarks and returns the resulting image.
29
+ """
30
+
31
+ def __init__(self):
32
+ """Initialize the NecklaceTryOn class with a PoseDetector and configuration settings."""
33
+ self.detector = PoseDetector()
34
+ self.config = getConfig("config.ini")
35
+
36
+ def necklaceTryOn(self, image: Image.Image, jewellery: Image.Image) -> Image.Image:
37
+ """
38
+ Overlay a jewelry image onto a person's image to simulate wearing the jewelry.
39
+
40
+ Args:
41
+ image (Image.Image): The user's image, ideally captured in a standing, upright position.
42
+ jewellery (Image.Image): The image of the jewelry piece (e.g., necklace) to be overlaid.
43
+
44
+ Returns:
45
+ Image.Image: A PIL Image depicting the user wearing the specified jewelry.
46
+
47
+ Raises:
48
+ CustomException: If an error occurs during the image processing.
49
+ """
50
+ try:
51
+ logger.info("converting images to numpy arrays")
52
+ image = np.array(image)
53
+ jewellery = np.array(jewellery)
54
+
55
+ logger.info("creating a copy of original image for actual overlay")
56
+ copyImage = image.copy()
57
+
58
+ logger.info("detecting body landmarks from the input image")
59
+ image = self.detector.findPose(image)
60
+ lmList, _ = self.detector.findPosition(image, bboxWithHands = False, draw = False)
61
+ pt12, pt11, pt10, pt9 = (
62
+ lmList[12][:2],
63
+ lmList[11][:2],
64
+ lmList[10][:2],
65
+ lmList[9][:2],
66
+ )
67
+
68
+ logger.info("calculating the precise neck points")
69
+ avgX1 = int(pt12[0] + (pt10[0] - pt12[0]) / 1.75)
70
+ avgY1 = int(pt12[1] - (pt12[1] - pt10[1]) / 1.75)
71
+ avgX2 = int(pt11[0] - (pt11[0] - pt9[0]) / 1.75)
72
+ avgY2 = int(pt11[1] - (pt11[1] - pt9[1]) / 1.75)
73
+
74
+ logger.info("rescaling the necklace to appropriate dimensions")
75
+ xDist = avgX2 - avgX1
76
+ origImgRatio = xDist / jewellery.shape[1]
77
+ yDist = jewellery.shape[0] * origImgRatio
78
+ jewellery = cv2.resize(
79
+ jewellery, (int(xDist), int(yDist)), interpolation = cv2.INTER_CUBIC
80
+ )
81
+
82
+ logger.info("calculating required offset to be added to the necklace image for perfect fitting")
83
+ imageGray = cv2.cvtColor(jewellery, cv2.COLOR_BGRA2GRAY)
84
+ for offsetOrig in range(imageGray.shape[1]):
85
+ pixelValue = imageGray[0, :][offsetOrig]
86
+ if (pixelValue != 255) & (pixelValue != 0):
87
+ break
88
+ else:
89
+ continue
90
+ offset = int(self.config.getfloat("NECKLACE TRY ON", "offsetFactor") * xDist * (offsetOrig / jewellery.shape[1]))
91
+ yCoordinate = avgY1 - offset
92
+
93
+ logger.info("tilting the necklace image as per the necklace points")
94
+ angle = math.ceil(
95
+ self.detector.findAngle(
96
+ p1 = (avgX2, avgY2), p2 = (avgX1, avgY1), p3 = (avgX2, avgY1)
97
+ )[0]
98
+ )
99
+ if avgY2 < avgY1:
100
+ pass
101
+ else:
102
+ angle = angle * -1
103
+ jewellery = cvzone.rotateImage(jewellery, angle)
104
+
105
+ logger.info("checking if the necklace is getting out of the frame and trimming from above if needed")
106
+ availableSpace = copyImage.shape[0] - yCoordinate
107
+ extra = jewellery.shape[0] - availableSpace
108
+
109
+ logger.info("applying the calculated settings")
110
+ if extra > 0:
111
+ jewellery = jewellery[extra + 10 :, :]
112
+ return self.necklaceTryOn(
113
+ Image.fromarray(copyImage), Image.fromarray(jewellery)
114
+ )
115
+ else:
116
+ result = cvzone.overlayPNG(copyImage, jewellery, (avgX1, yCoordinate))
117
+ result = Image.fromarray(result.astype(np.uint8))
118
+ return result
119
+
120
+ except Exception as e:
121
+ logger.error(CustomException(e))
122
+ print(CustomException(e))
src/pipelines/__init__.py ADDED
File without changes
src/pipelines/completePipeline.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.components.necklaceTryOn import NecklaceTryOn
2
+ from src.components.clothingTryOn import ClothingTryOn
3
+ from PIL import Image
4
+
5
+ class Pipeline:
6
+ """
7
+ A pipeline for performing jewelry and clothing try-on operations.
8
+
9
+ This class encapsulates the functionality for overlaying jewelry
10
+ and clothing on user images using the NecklaceTryOn and ClothingTryOn
11
+ components.
12
+
13
+ Attributes:
14
+ necklaceTryOnObject (NecklaceTryOn): Instance for necklace try-on functionality.
15
+ clothingTryOnObject (ClothingTryOn): Instance for clothing try-on functionality.
16
+ """
17
+
18
+ def __init__(self):
19
+ """
20
+ Initializes the Pipeline with instances of NecklaceTryOn and ClothingTryOn.
21
+
22
+ This constructor sets up the necessary objects required for the
23
+ try-on functionalities.
24
+ """
25
+ self.necklaceTryOnObject = NecklaceTryOn()
26
+ self.clothingTryOnObject = ClothingTryOn()
27
+
28
+ def necklaceTryOn(self, image: Image.Image, jewellery: Image.Image) -> Image.Image:
29
+ """
30
+ Overlay a necklace image onto the user's image.
31
+
32
+ Args:
33
+ image (Image.Image): The user's image, ideally captured in a standing position.
34
+ jewellery (Image.Image): The image of the necklace to be overlaid.
35
+
36
+ Returns:
37
+ Image.Image: A PIL Image depicting the user wearing the specified necklace.
38
+ """
39
+ result = self.necklaceTryOnObject.necklaceTryOn(image = image, jewellery = jewellery)
40
+ return result
41
+
42
+ def clothingTryOn(self, image: Image.Image, jewellery: Image.Image) -> Image.Image:
43
+ """
44
+ Simulate wearing clothing on the user's image and generate the final output.
45
+
46
+ Args:
47
+ image (Image.Image): The user's image, ideally captured in a standing position.
48
+ jewellery (Image.Image): The image of the clothing item to be overlaid.
49
+
50
+ Returns:
51
+ Image.Image: A PIL Image depicting the user wearing the specified clothing.
52
+ """
53
+ tryOnOutput, mask = self.clothingTryOnObject.getBinaryMask(image = image, jewellery = jewellery)
54
+ results = self.clothingTryOnObject.generateImage(image = tryOnOutput, mask = mask)
55
+ return results
src/utils/__init__.py ADDED
File without changes
src/utils/exceptions.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ def error_message_detail(error):
4
+ """
5
+ Get exception information
6
+ """
7
+ _, _, exc_info = sys.exc_info()
8
+ filename = exc_info.tb_frame.f_code.co_filename
9
+ lineno = exc_info.tb_lineno
10
+ error_message = "Error encountered in line no [{}], filename : [{}], saying [{}]".format(lineno, filename, error)
11
+ return error_message
12
+
13
+ class CustomException(Exception):
14
+ def __init__(self, error_message):
15
+ super().__init__(error_message) # Call the parent class constructor
16
+ self.error_message = error_message_detail(error_message)
17
+
18
+ def __str__(self) -> str:
19
+ return self.error_message
src/utils/functions.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from appwrite.services.storage import Storage
2
+ from appwrite.client import Client
3
+ from appwrite.query import Query
4
+ from io import BytesIO
5
+ from PIL import Image
6
+ import configparser
7
+ import os
8
+
9
+
10
+ def getImages(nImages: int) -> dict[str, list[Image.Image]]:
11
+ """
12
+ Retrieves images from the configured Appwrite S3 bucket.
13
+
14
+ Args:
15
+ nImages (int): The maximum number of images to retrieve from the bucket.
16
+
17
+ Returns:
18
+ dict[str, list[Image.Image]]: A dictionary where each key is a category (str) and each value is a list of PIL images (list[Image.Image]) belonging to that category.
19
+ """
20
+ # configuring the appwrite client
21
+ client = Client()
22
+ (client
23
+ .set_endpoint(os.environ["APPWRITE_ENDPOINT"])
24
+ .set_project(os.environ["APPWRITE_PROJECT_ID"])
25
+ .set_key(os.environ["APPWRITE_API_KEY"])
26
+ .set_self_signed()
27
+ .set_session("")
28
+ )
29
+
30
+ # retrieving names of all files from the storage bucket
31
+ storage = Storage(client)
32
+ allFiles = storage.list_files(bucket_id = os.environ["APPWRITE_BUCKET_ID"], queries = [Query.limit(nImages)])
33
+ allFiles = [file["$id"] for file in allFiles["files"]]
34
+ extractedData = {
35
+ "chokers": [x for x in allFiles if x.startswith("CH")],
36
+ "shortNecklaces": [x for x in allFiles if x.startswith("SN")],
37
+ "longNecklaces": [x for x in allFiles if x.startswith("LN")],
38
+ "models": [x for x in allFiles if x.startswith("MD")]
39
+ }
40
+
41
+ # getting PIL images out of the files
42
+ extractedData = {
43
+ x: [
44
+ Image.open(
45
+ BytesIO(
46
+ storage.get_file_view(
47
+ bucket_id = os.environ["APPWRITE_BUCKET_ID"],
48
+ file_id = y
49
+ )
50
+ )
51
+ ) for y in extractedData[x]
52
+ ] for x in extractedData
53
+ }
54
+
55
+ return extractedData
56
+
57
+
58
+
59
+ def getConfig(path: str):
60
+ """
61
+ Load configuration from a specified file.
62
+
63
+ Args:
64
+ path (str): The path to the configuration file.
65
+
66
+ Returns:
67
+ ConfigParser: The loaded configuration object.
68
+ """
69
+ config = configparser.ConfigParser()
70
+ config.read(path)
71
+ return config
src/utils/logger.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ # Create a logger instance
5
+ logger = logging.getLogger(__name__)
6
+ logger.setLevel(logging.DEBUG)
7
+
8
+ # Define the directory for log files
9
+ LOG_DIR = os.path.join(os.getcwd(), "logs")
10
+ os.makedirs(LOG_DIR, exist_ok=True)
11
+ LOG_FILE = os.path.join(LOG_DIR, "runningLogs.log")
12
+
13
+ # Initialize stream handler and file handler for console output
14
+ streamHandler = logging.StreamHandler()
15
+ fileHandler = logging.FileHandler(LOG_FILE)
16
+
17
+ # Set the logging level for each handler
18
+ streamHandler.setLevel(logging.INFO)
19
+ fileHandler.setLevel(logging.DEBUG)
20
+
21
+ # Configure the logging format for both handlers
22
+ logFormatter = logging.Formatter("[%(asctime)s: %(levelname)s: %(module)s: %(message)s]")
23
+ streamHandler.setFormatter(logFormatter)
24
+ fileHandler.setFormatter(logFormatter)
25
+
26
+ # Add the configured handlers to the logger
27
+ logger.addHandler(streamHandler)
28
+ logger.addHandler(fileHandler)