Spaces:
Sleeping
Sleeping
Commit
·
1fb00de
1
Parent(s):
7315ca6
add: image preprocessing
Browse files- app.py +13 -452
- src/api/__init__.py +5 -0
- src/api/image_prep_api.py +158 -0
- src/api/nto_api.py +426 -0
- src/components/auto_crop.py +61 -0
- src/pipelines/completePipeline.py +1 -0
app.py
CHANGED
@@ -1,26 +1,18 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
from src.utils import supabaseGetPublicURL, deductAndTrackCredit, returnBytesData
|
7 |
-
from fastapi import FastAPI, File, UploadFile, Header, HTTPException, Form, Depends
|
8 |
-
from src.pipelines.completePipeline import Pipeline
|
9 |
-
from fastapi.middleware.cors import CORSMiddleware
|
10 |
-
from fastapi.responses import JSONResponse
|
11 |
-
from supabase import create_client, Client
|
12 |
-
from typing import Dict, Union, List
|
13 |
-
from io import BytesIO
|
14 |
-
from PIL import Image
|
15 |
-
import pandas as pd
|
16 |
-
import base64
|
17 |
-
import os
|
18 |
-
from pydantic import BaseModel
|
19 |
-
import replicate
|
20 |
|
21 |
-
|
|
|
22 |
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
app.add_middleware(
|
26 |
CORSMiddleware,
|
@@ -29,434 +21,3 @@ app.add_middleware(
|
|
29 |
allow_methods=["*"],
|
30 |
allow_headers=["*"],
|
31 |
)
|
32 |
-
url: str = os.getenv("SUPABASE_URL")
|
33 |
-
key: str = os.getenv("SUPABASE_KEY")
|
34 |
-
supabase: Client = create_client(supabase_key=key, supabase_url=url)
|
35 |
-
bucket = supabase.storage.from_("JewelMirrorOutputs")
|
36 |
-
|
37 |
-
|
38 |
-
def replicate_run(input):
|
39 |
-
output = replicate.run(
|
40 |
-
"stability-ai/stable-diffusion-inpainting:95b7223104132402a9ae91cc677285bc5eb997834bd2349fa486f53910fd68b3",
|
41 |
-
input=input)
|
42 |
-
return output
|
43 |
-
|
44 |
-
|
45 |
-
class NecklaceTryOnIDEntity(BaseModel):
|
46 |
-
necklaceImageId: str
|
47 |
-
necklaceCategory: str
|
48 |
-
storename: str
|
49 |
-
api_token: str
|
50 |
-
|
51 |
-
|
52 |
-
@app.post("/clothingTryOnV2")
|
53 |
-
async def clothing_try_on_v2(image: UploadFile = File(...), clothing_type: str = Form(...)):
|
54 |
-
image_bytes = await image.read()
|
55 |
-
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
56 |
-
|
57 |
-
mask = await pipeline.shoulderPointMaskGeneration_(image=image)
|
58 |
-
|
59 |
-
mask_img_base_64, act_img_base_64 = BytesIO(), BytesIO()
|
60 |
-
mask.save(mask_img_base_64, format="WEBP")
|
61 |
-
image.save(act_img_base_64, format="WEBP")
|
62 |
-
mask_bytes_ = base64.b64encode(mask_img_base_64.getvalue()).decode("utf-8")
|
63 |
-
image_bytes_ = base64.b64encode(act_img_base_64.getvalue()).decode("utf-8")
|
64 |
-
|
65 |
-
mask_data_uri = f"data:image/webp;base64,{mask_bytes_}"
|
66 |
-
image_data_uri = f"data:image/webp;base64,{image_bytes_}"
|
67 |
-
|
68 |
-
input = {
|
69 |
-
"mask": mask_data_uri,
|
70 |
-
"image": image_data_uri,
|
71 |
-
"prompt": f"Dull {clothing_type}, non-reflective clothing, properly worn, natural setting, elegant, natural look, neckline without jewellery, simple, perfect eyes, perfect face, perfect body, high quality, realistic, photorealistic, high resolution,traditional full sleeve blouse",
|
72 |
-
"negative_prompt": "necklaces, jewellery, jewelry, necklace, neckpiece, garland, chain, neck wear, jewelled neck, jeweled neck, necklace on neck, jewellery on neck, accessories, watermark, text, changed background, wider body, narrower body, bad proportions, extra limbs, mutated hands, changed sizes, altered proportions, unnatural body proportions, blury, ugly",
|
73 |
-
"num_inference_steps": 25
|
74 |
-
}
|
75 |
-
|
76 |
-
output = replicate_run(input)
|
77 |
-
|
78 |
-
response = {
|
79 |
-
"output": f"{output[0]}",
|
80 |
-
'code': 200
|
81 |
-
}
|
82 |
-
|
83 |
-
return JSONResponse(content=response, status_code=200)
|
84 |
-
|
85 |
-
|
86 |
-
@app.post("/clothingTryOn")
|
87 |
-
async def clothing_try_on(image: UploadFile = File(...),
|
88 |
-
mask: UploadFile = File(...), clothing_type: str = Form(...)):
|
89 |
-
image_bytes = await image.read()
|
90 |
-
mask_bytes = await mask.read()
|
91 |
-
image, mask = Image.open(BytesIO(image_bytes)).convert("RGB"), Image.open(
|
92 |
-
BytesIO(mask_bytes)).convert("RGB")
|
93 |
-
|
94 |
-
actual_image = image.copy()
|
95 |
-
|
96 |
-
jewellery_mask = Image.fromarray(
|
97 |
-
np.bitwise_and(np.array(mask), np.array(image))
|
98 |
-
)
|
99 |
-
arr_orig = np.array(grayscale(mask))
|
100 |
-
|
101 |
-
image = cv2.inpaint(np.array(image), arr_orig, 15, cv2.INPAINT_TELEA)
|
102 |
-
image = Image.fromarray(image).resize((512, 512))
|
103 |
-
|
104 |
-
arr = arr_orig.copy()
|
105 |
-
mask_y = np.where(arr == arr[arr != 0][0])[0][0]
|
106 |
-
arr[mask_y:, :] = 255
|
107 |
-
|
108 |
-
mask = Image.fromarray(arr).resize((512, 512))
|
109 |
-
|
110 |
-
mask_img_base_64, act_img_base_64 = BytesIO(), BytesIO()
|
111 |
-
mask.save(mask_img_base_64, format="WEBP")
|
112 |
-
image.save(act_img_base_64, format="WEBP")
|
113 |
-
mask_bytes_ = base64.b64encode(mask_img_base_64.getvalue()).decode("utf-8")
|
114 |
-
image_bytes_ = base64.b64encode(act_img_base_64.getvalue()).decode("utf-8")
|
115 |
-
|
116 |
-
mask_data_uri = f"data:image/webp;base64,{mask_bytes_}"
|
117 |
-
image_data_uri = f"data:image/webp;base64,{image_bytes_}"
|
118 |
-
|
119 |
-
input = {
|
120 |
-
"mask": mask_data_uri,
|
121 |
-
"image": image_data_uri,
|
122 |
-
"prompt": f"Dull {clothing_type}, non-reflective clothing, properly worn, natural setting, elegant, natural look, neckline without jewellery, simple, perfect eyes, perfect face, perfect body, high quality, realistic, photorealistic, high resolution,traditional full sleeve blouse",
|
123 |
-
"negative_prompt": "necklaces, jewellery, jewelry, necklace, neckpiece, garland, chain, neck wear, jewelled neck, jeweled neck, necklace on neck, jewellery on neck, accessories, watermark, text, changed background, wider body, narrower body, bad proportions, extra limbs, mutated hands, changed sizes, altered proportions, unnatural body proportions, blury, ugly",
|
124 |
-
"num_inference_steps": 25
|
125 |
-
}
|
126 |
-
|
127 |
-
output = replicate_run(input)
|
128 |
-
image_base = str(output[0])
|
129 |
-
image_base_64 = image_base.split(",")[1]
|
130 |
-
image_data = base64.b64decode(image_base_64)
|
131 |
-
|
132 |
-
output_image = Image.open(BytesIO(image_data)).resize(actual_image.size)
|
133 |
-
output_image = np.bitwise_and(
|
134 |
-
np.array(output_image),
|
135 |
-
np.bitwise_not(np.array(Image.fromarray(arr_orig).convert("RGB"))),
|
136 |
-
)
|
137 |
-
|
138 |
-
result = Image.fromarray(np.bitwise_or(np.array(output_image), np.array(jewellery_mask)))
|
139 |
-
|
140 |
-
in_mem_file = BytesIO()
|
141 |
-
result.save(in_mem_file, format="WEBP", quality=85)
|
142 |
-
base_64_output = base64.b64encode(in_mem_file.getvalue()).decode('utf-8')
|
143 |
-
|
144 |
-
response = {
|
145 |
-
"output": f"data:image/WEBP;base64,{base_64_output}",
|
146 |
-
'code': 200
|
147 |
-
}
|
148 |
-
|
149 |
-
return JSONResponse(content=response, status_code=200)
|
150 |
-
|
151 |
-
|
152 |
-
@app.post("/productData/{storeId}")
|
153 |
-
async def product_data(
|
154 |
-
storeId: str,
|
155 |
-
filterattributes: List[Dict[str, Union[str, int, float]]],
|
156 |
-
storename: str = Header(default="default")
|
157 |
-
):
|
158 |
-
"""Filters product data based on the provided attributes and store ID."""
|
159 |
-
|
160 |
-
try:
|
161 |
-
response = supabase.table('MagicMirror').select("*").execute()
|
162 |
-
df = pd.DataFrame(response.dict()["data"])
|
163 |
-
|
164 |
-
df = df[df["StoreName"] == storeId]
|
165 |
-
|
166 |
-
# Preprocess filterattributes to handle multiple or duplicated attributes
|
167 |
-
attribute_dict = {}
|
168 |
-
for attr in filterattributes:
|
169 |
-
key, value = list(attr.items())[
|
170 |
-
0] # This will convert the dictionary into a list and get the key and value.
|
171 |
-
if key in attribute_dict: # This will check if the key is already present in the dictionary.
|
172 |
-
if isinstance(attribute_dict[key],
|
173 |
-
list): # This will create a list if there are multiple values for the same key and we are doing or operation.
|
174 |
-
attribute_dict[key].append(value) # This will append the value to the list.
|
175 |
-
else:
|
176 |
-
attribute_dict[key] = [attribute_dict[key], value]
|
177 |
-
else:
|
178 |
-
attribute_dict[key] = [value] # This will create a list if there is only one value for the key.
|
179 |
-
|
180 |
-
priceFrom = None
|
181 |
-
priceTo = None
|
182 |
-
weightFrom = None
|
183 |
-
weightTo = None
|
184 |
-
weightAscending = None
|
185 |
-
priceAscending = None
|
186 |
-
idAscending = None
|
187 |
-
dateAscending = None
|
188 |
-
|
189 |
-
for key, value in attribute_dict.items():
|
190 |
-
if key == 'priceFrom':
|
191 |
-
priceFrom = value[0]
|
192 |
-
|
193 |
-
elif key == "priceTo":
|
194 |
-
priceTo = value[0]
|
195 |
-
|
196 |
-
elif key == "priceAscending":
|
197 |
-
priceAscending = value[0]
|
198 |
-
|
199 |
-
elif key == "weightFrom":
|
200 |
-
weightFrom = value[0]
|
201 |
-
|
202 |
-
elif key == "weightTo":
|
203 |
-
weightTo = value[0]
|
204 |
-
|
205 |
-
elif key == "weightAscending":
|
206 |
-
weightAscending = value[0]
|
207 |
-
|
208 |
-
elif key == "idAscending":
|
209 |
-
idAscending = value[0]
|
210 |
-
|
211 |
-
elif key == "dateAscending":
|
212 |
-
dateAscending = value[0]
|
213 |
-
|
214 |
-
df["image_url"] = df.apply(
|
215 |
-
lambda row: supabaseGetPublicURL(f"{row['StoreName']}/{row['Category']}/image/{row['Id']}.png"),
|
216 |
-
axis=1)
|
217 |
-
df["thumbnail_url"] = df.apply(
|
218 |
-
lambda row: supabaseGetPublicURL(f"{row['StoreName']}/{row['Category']}/thumbnail/{row['Id']}.png"),
|
219 |
-
axis=1)
|
220 |
-
|
221 |
-
df.reset_index(drop=True, inplace=True)
|
222 |
-
for key, values in attribute_dict.items():
|
223 |
-
try:
|
224 |
-
df = df[df[key].isin(values)]
|
225 |
-
|
226 |
-
except:
|
227 |
-
pass
|
228 |
-
|
229 |
-
# applying filter for price and weight
|
230 |
-
if priceFrom is not None:
|
231 |
-
df = df[df["Price"] >= priceFrom]
|
232 |
-
if priceTo is not None:
|
233 |
-
df = df[df["Price"] <= priceTo]
|
234 |
-
if weightFrom is not None:
|
235 |
-
df = df[df["Weight"] >= weightFrom]
|
236 |
-
if weightTo is not None:
|
237 |
-
df = df[df["Weight"] <= weightTo]
|
238 |
-
|
239 |
-
if priceAscending is not None:
|
240 |
-
if priceAscending == 1:
|
241 |
-
value = True
|
242 |
-
|
243 |
-
else:
|
244 |
-
value = False
|
245 |
-
df = df.sort_values(by="Price", ascending=value)
|
246 |
-
if weightAscending is not None:
|
247 |
-
if weightAscending == 1:
|
248 |
-
value = True
|
249 |
-
|
250 |
-
else:
|
251 |
-
value = False
|
252 |
-
df = df.sort_values(by="Weight", ascending=value)
|
253 |
-
|
254 |
-
if idAscending is not None:
|
255 |
-
if idAscending == 1:
|
256 |
-
value = True
|
257 |
-
else:
|
258 |
-
value = False
|
259 |
-
df = df.sort_values(by="Id", ascending=value)
|
260 |
-
|
261 |
-
if dateAscending is not None:
|
262 |
-
if dateAscending == 1:
|
263 |
-
value = True
|
264 |
-
else:
|
265 |
-
value = False
|
266 |
-
df = df.sort_values(by="UpdatedAt", ascending=value)
|
267 |
-
|
268 |
-
df = df.drop(["CreatedAt", "EstimatedPrice"], axis=1)
|
269 |
-
|
270 |
-
result = {}
|
271 |
-
for _, row in df.iterrows():
|
272 |
-
category = row["Category"]
|
273 |
-
if category not in result: # this is for checking duplicate category
|
274 |
-
result[category] = []
|
275 |
-
result[category].append(row.to_dict())
|
276 |
-
|
277 |
-
return JSONResponse(content=jsonable_encoder(result)) # this will convert the result into json format.
|
278 |
-
|
279 |
-
except Exception as e:
|
280 |
-
raise HTTPException(status_code=500, detail=f"Failed to fetch or process data: {e}")
|
281 |
-
|
282 |
-
|
283 |
-
async def parse_necklace_try_on_id(necklaceImageId: str = Form(...),
|
284 |
-
necklaceCategory: str = Form(...),
|
285 |
-
storename: str = Form(...),
|
286 |
-
api_token: str = Form(...)) -> NecklaceTryOnIDEntity:
|
287 |
-
return NecklaceTryOnIDEntity(
|
288 |
-
necklaceImageId=necklaceImageId,
|
289 |
-
necklaceCategory=necklaceCategory,
|
290 |
-
storename=storename,
|
291 |
-
api_token=api_token
|
292 |
-
)
|
293 |
-
|
294 |
-
|
295 |
-
@app.post("/necklaceTryOnID")
|
296 |
-
async def necklace_try_on_id(necklace_try_on_id: NecklaceTryOnIDEntity = Depends(parse_necklace_try_on_id),
|
297 |
-
image: UploadFile = File(...)):
|
298 |
-
data, _ = supabase.table("APIKeyList").select("*").filter("API_KEY", "eq",
|
299 |
-
necklace_try_on_id.api_token).execute()
|
300 |
-
|
301 |
-
api_key_actual = data[1][0]['API_KEY']
|
302 |
-
if api_key_actual != necklace_try_on_id.api_token:
|
303 |
-
return JSONResponse(content={"error": "Invalid API Key"}, status_code=401)
|
304 |
-
|
305 |
-
else:
|
306 |
-
imageBytes = await image.read()
|
307 |
-
|
308 |
-
jewellery_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/Stores/{necklace_try_on_id.storename}/{necklace_try_on_id.necklaceCategory}/image/{necklace_try_on_id.necklaceImageId}.png"
|
309 |
-
|
310 |
-
try:
|
311 |
-
image, jewellery = Image.open(BytesIO(imageBytes)), Image.open(returnBytesData(url=jewellery_url))
|
312 |
-
|
313 |
-
except:
|
314 |
-
error_message = {
|
315 |
-
"error": "The requested resource (Image, necklace category, or store) is not available. Please verify the availability and try again."
|
316 |
-
}
|
317 |
-
|
318 |
-
return JSONResponse(content=error_message, status_code=404)
|
319 |
-
|
320 |
-
result, headetText, mask = await pipeline.necklaceTryOn_(image=image, jewellery=jewellery,
|
321 |
-
storename=necklace_try_on_id.storename)
|
322 |
-
|
323 |
-
inMemFile = BytesIO()
|
324 |
-
inMemFileMask = BytesIO()
|
325 |
-
result.save(inMemFile, format="WEBP", quality=85)
|
326 |
-
mask.save(inMemFileMask, format="WEBP", quality=85)
|
327 |
-
outputBytes = inMemFile.getvalue()
|
328 |
-
maskBytes = inMemFileMask.getvalue()
|
329 |
-
response = {
|
330 |
-
"output": f"data:image/WEBP;base64,{base64.b64encode(outputBytes).decode('utf-8')}",
|
331 |
-
"mask": f"data:image/WEBP;base64,{base64.b64encode(maskBytes).decode('utf-8')}"
|
332 |
-
}
|
333 |
-
creditResponse = deductAndTrackCredit(storename=necklace_try_on_id.storename, endpoint="/necklaceTryOnID")
|
334 |
-
if creditResponse == "No Credits Available":
|
335 |
-
response = {
|
336 |
-
"error": "No Credits Remaining"
|
337 |
-
}
|
338 |
-
|
339 |
-
return JSONResponse(content=response)
|
340 |
-
|
341 |
-
else:
|
342 |
-
return JSONResponse(content=response)
|
343 |
-
|
344 |
-
|
345 |
-
@app.post("/necklace_try_on_id_url")
|
346 |
-
async def necklace_try_on_id_url(necklace_try_on_id: NecklaceTryOnIDEntity = Depends(parse_necklace_try_on_id),
|
347 |
-
image: UploadFile = File(...)):
|
348 |
-
imageBytes = await image.read()
|
349 |
-
|
350 |
-
jewellery_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/Stores/{necklace_try_on_id.storename}/{necklace_try_on_id.necklaceCategory}/image/{necklace_try_on_id.necklaceImageId}.png"
|
351 |
-
|
352 |
-
try:
|
353 |
-
image, jewellery = Image.open(BytesIO(imageBytes)), Image.open(returnBytesData(url=jewellery_url))
|
354 |
-
|
355 |
-
except:
|
356 |
-
error_message = {
|
357 |
-
"error": "The requested resource (Image, necklace category, or store) is not available. Please verify the availability and try again."
|
358 |
-
}
|
359 |
-
|
360 |
-
return JSONResponse(content=error_message, status_code=404)
|
361 |
-
|
362 |
-
result, headerText, mask = await pipeline.necklaceTryOn_(
|
363 |
-
image=image, jewellery=jewellery, storename=necklace_try_on_id.storename
|
364 |
-
)
|
365 |
-
|
366 |
-
response = {}
|
367 |
-
for image_obj, prefix in [(result, "result"), (mask, "mask")]:
|
368 |
-
file_name = f"{prefix}{secrets.token_hex(24)}.webp"
|
369 |
-
|
370 |
-
inMemFile = BytesIO()
|
371 |
-
image_obj.save(inMemFile, format="WEBP", quality=85)
|
372 |
-
in_mem_file = inMemFile.getvalue()
|
373 |
-
|
374 |
-
bucket.upload(file_name, in_mem_file)
|
375 |
-
|
376 |
-
public_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/JewelMirrorOutputs/{file_name}"
|
377 |
-
|
378 |
-
response[f"{prefix}_url"] = public_url
|
379 |
-
|
380 |
-
return JSONResponse(content=response)
|
381 |
-
|
382 |
-
|
383 |
-
@app.post("/canvasPoints")
|
384 |
-
async def canvas_points(necklace_try_on_id: NecklaceTryOnIDEntity = Depends(parse_necklace_try_on_id),
|
385 |
-
image: UploadFile = File(...)):
|
386 |
-
imageBytes = await image.read()
|
387 |
-
|
388 |
-
jewellery_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/Stores/{necklace_try_on_id.storename}/{necklace_try_on_id.necklaceCategory}/image/{necklace_try_on_id.necklaceImageId}.png"
|
389 |
-
|
390 |
-
try:
|
391 |
-
image, jewellery = Image.open(BytesIO(imageBytes)), Image.open(returnBytesData(url=jewellery_url))
|
392 |
-
|
393 |
-
except:
|
394 |
-
error_message = {
|
395 |
-
"error": "The requested resource (Image, necklace category, or store) is not available. Please verify the availability and try again."
|
396 |
-
}
|
397 |
-
|
398 |
-
return JSONResponse(content=error_message, status_code=404)
|
399 |
-
|
400 |
-
response = await pipeline.canvasPoint(
|
401 |
-
image=image, jewellery=jewellery, storename=necklace_try_on_id.storename
|
402 |
-
)
|
403 |
-
|
404 |
-
creditResponse = deductAndTrackCredit(storename=necklace_try_on_id.storename, endpoint="/necklaceTryOnID")
|
405 |
-
if creditResponse == "No Credits Available":
|
406 |
-
response = {
|
407 |
-
"error": "No Credits Remaining"
|
408 |
-
}
|
409 |
-
|
410 |
-
return JSONResponse(content=response)
|
411 |
-
|
412 |
-
else:
|
413 |
-
return JSONResponse(content=response)
|
414 |
-
|
415 |
-
|
416 |
-
@app.post("/necklaceTryOnWithPoints")
|
417 |
-
async def necklace_try_on_with_points(necklace_try_on_id: NecklaceTryOnIDEntity = Depends(parse_necklace_try_on_id),
|
418 |
-
image: UploadFile = File(...),
|
419 |
-
left_x: int = Form(...),
|
420 |
-
left_y: int = Form(...),
|
421 |
-
right_x: int = Form(...),
|
422 |
-
right_y: int = Form(...)):
|
423 |
-
imageBytes = await image.read()
|
424 |
-
|
425 |
-
jewellery_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/Stores/{necklace_try_on_id.storename}/{necklace_try_on_id.necklaceCategory}/image/{necklace_try_on_id.necklaceImageId}.png"
|
426 |
-
|
427 |
-
try:
|
428 |
-
image, jewellery = Image.open(BytesIO(imageBytes)), Image.open(returnBytesData(url=jewellery_url))
|
429 |
-
|
430 |
-
except:
|
431 |
-
error_message = {
|
432 |
-
"error": "The requested resource (Image, necklace category, or store) is not available. Please verify the availability and try again."
|
433 |
-
}
|
434 |
-
|
435 |
-
return JSONResponse(content=error_message, status_code=404)
|
436 |
-
|
437 |
-
result, headerText, mask = await pipeline.necklaceTryOnWithPoints_(
|
438 |
-
image=image, jewellery=jewellery, left_shoulder=(left_x, left_y), right_shoulder=(right_x, right_y),
|
439 |
-
storename=necklace_try_on_id.storename
|
440 |
-
)
|
441 |
-
|
442 |
-
inMemFile = BytesIO()
|
443 |
-
inMemFileMask = BytesIO()
|
444 |
-
result.save(inMemFile, format="WEBP", quality=85)
|
445 |
-
mask.save(inMemFileMask, format="WEBP", quality=85)
|
446 |
-
outputBytes = inMemFile.getvalue()
|
447 |
-
maskBytes = inMemFileMask.getvalue()
|
448 |
-
response = {
|
449 |
-
"output": f"data:image/WEBP;base64,{base64.b64encode(outputBytes).decode('utf-8')}",
|
450 |
-
"mask": f"data:image/WEBP;base64,{base64.b64encode(maskBytes).decode('utf-8')}"
|
451 |
-
}
|
452 |
-
|
453 |
-
creditResponse = deductAndTrackCredit(storename=necklace_try_on_id.storename, endpoint="/necklaceTryOnID")
|
454 |
-
if creditResponse == "No Credits Available":
|
455 |
-
response = {
|
456 |
-
"error": "No Credits Remaining"
|
457 |
-
}
|
458 |
-
|
459 |
-
return JSONResponse(content=response)
|
460 |
-
|
461 |
-
else:
|
462 |
-
return JSONResponse(content=response)
|
|
|
1 |
+
"""
|
2 |
+
project @ NTO-TCP-HF
|
3 |
+
created @ 2024-10-28
|
4 |
+
author @ github.com/ishworrsubedii
|
5 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
from fastapi import FastAPI
|
8 |
+
from starlette.middleware.cors import CORSMiddleware
|
9 |
|
10 |
+
from src.api.image_prep_api import preprocessing_router
|
11 |
+
from src.api.nto_api import nto_cto_router
|
12 |
+
|
13 |
+
app = FastAPI()
|
14 |
+
app.include_router(nto_cto_router, tags=["NTO-CTO"])
|
15 |
+
app.include_router(preprocessing_router, tags=["Image-Preprocessing"])
|
16 |
|
17 |
app.add_middleware(
|
18 |
CORSMiddleware,
|
|
|
21 |
allow_methods=["*"],
|
22 |
allow_headers=["*"],
|
23 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/api/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
project @ NTO-TCP-HF
|
3 |
+
created @ 2024-10-28
|
4 |
+
author @ github/ishworrsubedii
|
5 |
+
"""
|
src/api/image_prep_api.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
project @ NTO-TCP-HF
|
3 |
+
created @ 2024-10-28
|
4 |
+
author @ github.com/ishworrsubedii
|
5 |
+
"""
|
6 |
+
import base64
|
7 |
+
import os
|
8 |
+
from io import BytesIO
|
9 |
+
import numpy as np
|
10 |
+
import replicate
|
11 |
+
import requests
|
12 |
+
from PIL import Image
|
13 |
+
from fastapi import APIRouter, UploadFile, File, HTTPException
|
14 |
+
from fastapi.responses import JSONResponse
|
15 |
+
|
16 |
+
from src.components.auto_crop import crop_transparent_image
|
17 |
+
|
18 |
+
preprocessing_router = APIRouter()
|
19 |
+
|
20 |
+
rmbg: str = os.getenv("RMBG")
|
21 |
+
|
22 |
+
enhancer: str = os.getenv("ENHANCER")
|
23 |
+
|
24 |
+
|
25 |
+
def replicate_bg(input):
|
26 |
+
output = replicate.run(
|
27 |
+
rmbg,
|
28 |
+
input=input
|
29 |
+
)
|
30 |
+
return output
|
31 |
+
|
32 |
+
|
33 |
+
def replicate_enhancer(input):
|
34 |
+
output = replicate.run(
|
35 |
+
enhancer,
|
36 |
+
input=input
|
37 |
+
)
|
38 |
+
return output
|
39 |
+
|
40 |
+
|
41 |
+
@preprocessing_router.post("/rem_bg")
|
42 |
+
async def remove_background(image: UploadFile = File(...)):
|
43 |
+
image_bytes = await image.read()
|
44 |
+
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
45 |
+
act_img_base_64 = BytesIO()
|
46 |
+
image.save(act_img_base_64, format="WEBP")
|
47 |
+
image_bytes_ = base64.b64encode(act_img_base_64.getvalue()).decode("utf-8")
|
48 |
+
|
49 |
+
image_data_uri = f"data:image/WEBP;base64,{image_bytes_}"
|
50 |
+
|
51 |
+
input = {
|
52 |
+
"image": image_data_uri,
|
53 |
+
}
|
54 |
+
output = replicate_bg(input)
|
55 |
+
|
56 |
+
response = requests.get(output)
|
57 |
+
base_64 = base64.b64encode(response.content).decode('utf-8')
|
58 |
+
base64_prefix = "data:image/WEBP;base64,"
|
59 |
+
|
60 |
+
try:
|
61 |
+
response = {
|
62 |
+
"output": f"{base64_prefix}{base_64}",
|
63 |
+
'code': 200
|
64 |
+
}
|
65 |
+
|
66 |
+
return JSONResponse(content=response, status_code=200)
|
67 |
+
|
68 |
+
except Exception as e:
|
69 |
+
raise HTTPException(status_code=500, detail=f"Failed to process image: {e}")
|
70 |
+
|
71 |
+
|
72 |
+
@preprocessing_router.post("/upscale_image")
|
73 |
+
async def upscale_image(image: UploadFile = File(...), scale: int = 1):
|
74 |
+
image_bytes = await image.read()
|
75 |
+
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
76 |
+
act_img_base_64 = BytesIO()
|
77 |
+
image.save(act_img_base_64, format="WEBP")
|
78 |
+
image_bytes_ = base64.b64encode(act_img_base_64.getvalue()).decode("utf-8")
|
79 |
+
|
80 |
+
image_data_uri = f"data:image/WEBP;base64,{image_bytes_}"
|
81 |
+
|
82 |
+
input = {
|
83 |
+
"image": image_data_uri,
|
84 |
+
"scale": scale,
|
85 |
+
"face_enhance": False
|
86 |
+
|
87 |
+
}
|
88 |
+
output = replicate_enhancer(input)
|
89 |
+
|
90 |
+
response = requests.get(output)
|
91 |
+
base_64 = base64.b64encode(response.content).decode('utf-8')
|
92 |
+
base64_prefix = image_data_uri.split(",")[0] + ","
|
93 |
+
|
94 |
+
try:
|
95 |
+
response = {
|
96 |
+
"output": f"{base64_prefix}{base_64}",
|
97 |
+
'code': 200
|
98 |
+
}
|
99 |
+
|
100 |
+
return JSONResponse(content=response, status_code=200)
|
101 |
+
|
102 |
+
except Exception as e:
|
103 |
+
raise HTTPException(status_code=500, detail=f"Failed to process image: {e}")
|
104 |
+
|
105 |
+
|
106 |
+
@preprocessing_router.post("/crop_transparent")
|
107 |
+
async def crop_transparent(image: UploadFile):
|
108 |
+
if not image.content_type == "image/png":
|
109 |
+
raise HTTPException(status_code=400, detail="Only PNG files are supported")
|
110 |
+
|
111 |
+
try:
|
112 |
+
contents = await image.read()
|
113 |
+
|
114 |
+
cropped_image_bytes, metadata = crop_transparent_image(contents)
|
115 |
+
|
116 |
+
base64_image = base64.b64encode(cropped_image_bytes).decode('utf-8')
|
117 |
+
base64_prefix = "data:image/png;base64,"
|
118 |
+
|
119 |
+
return {
|
120 |
+
"status": "success",
|
121 |
+
"data": {
|
122 |
+
"image": f"{base64_prefix}{base64_image}",
|
123 |
+
"metadata": metadata
|
124 |
+
}
|
125 |
+
}
|
126 |
+
|
127 |
+
except ValueError as e:
|
128 |
+
raise HTTPException(status_code=400, detail=str(e))
|
129 |
+
except Exception as e:
|
130 |
+
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
131 |
+
|
132 |
+
|
133 |
+
@preprocessing_router.post("/background_replace")
|
134 |
+
async def nto_id(image: UploadFile = File(...), bg_image: UploadFile = File(...)):
|
135 |
+
image_bytes = await image.read()
|
136 |
+
bg_image = await bg_image.read()
|
137 |
+
|
138 |
+
image, bg_image = Image.open(BytesIO(image_bytes)).convert("RGBA"), Image.open(BytesIO(bg_image)).convert("RGB")
|
139 |
+
width, height = bg_image.size
|
140 |
+
background = Image.fromarray(np.array(bg_image)).resize((width, height))
|
141 |
+
orig_img = Image.fromarray(np.array(image)).resize((width, height))
|
142 |
+
background.paste(orig_img, (0, 0), mask=orig_img)
|
143 |
+
act_img_base_64 = BytesIO()
|
144 |
+
background.save(act_img_base_64, format="WEBP")
|
145 |
+
image_bytes_ = base64.b64encode(act_img_base_64.getvalue()).decode("utf-8")
|
146 |
+
|
147 |
+
image_data_uri = f"data:image/webp;base64,{image_bytes_}"
|
148 |
+
|
149 |
+
try:
|
150 |
+
response = {
|
151 |
+
"output": f"{image_data_uri}",
|
152 |
+
'code': 200
|
153 |
+
}
|
154 |
+
|
155 |
+
return JSONResponse(content=response, status_code=200)
|
156 |
+
|
157 |
+
except Exception as e:
|
158 |
+
raise HTTPException(status_code=500, detail=f"Failed to process image: {e}")
|
src/api/nto_api.py
ADDED
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
project @ NTO-TCP-HF
|
3 |
+
created @ 2024-10-28
|
4 |
+
author @ github/ishworrsubedii
|
5 |
+
"""
|
6 |
+
import cv2
|
7 |
+
import numpy as np
|
8 |
+
from PIL.ImageOps import grayscale
|
9 |
+
from fastapi.encoders import jsonable_encoder
|
10 |
+
from src.utils import supabaseGetPublicURL, deductAndTrackCredit, returnBytesData
|
11 |
+
from fastapi import File, UploadFile, Header, HTTPException, Form, Depends, APIRouter
|
12 |
+
from src.pipelines.completePipeline import Pipeline
|
13 |
+
from fastapi.responses import JSONResponse
|
14 |
+
from supabase import create_client, Client
|
15 |
+
from typing import Dict, Union, List
|
16 |
+
from io import BytesIO
|
17 |
+
from PIL import Image
|
18 |
+
import pandas as pd
|
19 |
+
import base64
|
20 |
+
import os
|
21 |
+
from pydantic import BaseModel
|
22 |
+
import replicate
|
23 |
+
|
24 |
+
pipeline = Pipeline()
|
25 |
+
|
26 |
+
nto_cto_router = APIRouter()
|
27 |
+
|
28 |
+
url: str = os.getenv("SUPABASE_URL")
|
29 |
+
key: str = os.getenv("SUPABASE_KEY")
|
30 |
+
|
31 |
+
supabase_storage: str = os.getenv("SUPABASE_STORAGE")
|
32 |
+
|
33 |
+
cto_replicate: str = os.getenv(
|
34 |
+
"CTO")
|
35 |
+
|
36 |
+
supabase: Client = create_client(supabase_key=key, supabase_url=url)
|
37 |
+
bucket = supabase.storage.from_(supabase_storage)
|
38 |
+
|
39 |
+
|
40 |
+
def replicate_run_cto(input):
|
41 |
+
output = replicate.run(
|
42 |
+
cto_replicate,
|
43 |
+
input=input)
|
44 |
+
return output
|
45 |
+
|
46 |
+
|
47 |
+
class NecklaceTryOnIDEntity(BaseModel):
|
48 |
+
necklaceImageId: str
|
49 |
+
necklaceCategory: str
|
50 |
+
storename: str
|
51 |
+
api_token: str
|
52 |
+
|
53 |
+
|
54 |
+
@nto_cto_router.post("/clothingTryOnV2")
|
55 |
+
async def clothing_try_on_v2(image: UploadFile = File(...), clothing_type: str = Form(...)):
|
56 |
+
image_bytes = await image.read()
|
57 |
+
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
58 |
+
|
59 |
+
mask = await pipeline.shoulderPointMaskGeneration_(image=image)
|
60 |
+
|
61 |
+
mask_img_base_64, act_img_base_64 = BytesIO(), BytesIO()
|
62 |
+
mask.save(mask_img_base_64, format="WEBP")
|
63 |
+
image.save(act_img_base_64, format="WEBP")
|
64 |
+
mask_bytes_ = base64.b64encode(mask_img_base_64.getvalue()).decode("utf-8")
|
65 |
+
image_bytes_ = base64.b64encode(act_img_base_64.getvalue()).decode("utf-8")
|
66 |
+
|
67 |
+
mask_data_uri = f"data:image/webp;base64,{mask_bytes_}"
|
68 |
+
image_data_uri = f"data:image/webp;base64,{image_bytes_}"
|
69 |
+
|
70 |
+
input = {
|
71 |
+
"mask": mask_data_uri,
|
72 |
+
"image": image_data_uri,
|
73 |
+
"prompt": f"Dull {clothing_type}, non-reflective clothing, properly worn, natural setting, elegant, natural look, neckline without jewellery, simple, perfect eyes, perfect face, perfect body, high quality, realistic, photorealistic, high resolution,traditional full sleeve blouse",
|
74 |
+
"negative_prompt": "necklaces, jewellery, jewelry, necklace, neckpiece, garland, chain, neck wear, jewelled neck, jeweled neck, necklace on neck, jewellery on neck, accessories, watermark, text, changed background, wider body, narrower body, bad proportions, extra limbs, mutated hands, changed sizes, altered proportions, unnatural body proportions, blury, ugly",
|
75 |
+
"num_inference_steps": 25
|
76 |
+
}
|
77 |
+
|
78 |
+
output = replicate_run_cto(input)
|
79 |
+
|
80 |
+
response = {
|
81 |
+
"output": f"{output[0]}",
|
82 |
+
'code': 200
|
83 |
+
}
|
84 |
+
|
85 |
+
return JSONResponse(content=response, status_code=200)
|
86 |
+
|
87 |
+
|
88 |
+
@nto_cto_router.post("/clothingTryOn")
|
89 |
+
async def clothing_try_on(image: UploadFile = File(...),
|
90 |
+
mask: UploadFile = File(...), clothing_type: str = Form(...)):
|
91 |
+
image_bytes = await image.read()
|
92 |
+
mask_bytes = await mask.read()
|
93 |
+
image, mask = Image.open(BytesIO(image_bytes)).convert("RGB"), Image.open(
|
94 |
+
BytesIO(mask_bytes)).convert("RGB")
|
95 |
+
|
96 |
+
actual_image = image.copy()
|
97 |
+
|
98 |
+
jewellery_mask = Image.fromarray(
|
99 |
+
np.bitwise_and(np.array(mask), np.array(image))
|
100 |
+
)
|
101 |
+
arr_orig = np.array(grayscale(mask))
|
102 |
+
|
103 |
+
image = cv2.inpaint(np.array(image), arr_orig, 15, cv2.INPAINT_TELEA)
|
104 |
+
image = Image.fromarray(image).resize((512, 512))
|
105 |
+
|
106 |
+
arr = arr_orig.copy()
|
107 |
+
mask_y = np.where(arr == arr[arr != 0][0])[0][0]
|
108 |
+
arr[mask_y:, :] = 255
|
109 |
+
|
110 |
+
mask = Image.fromarray(arr).resize((512, 512))
|
111 |
+
|
112 |
+
mask_img_base_64, act_img_base_64 = BytesIO(), BytesIO()
|
113 |
+
mask.save(mask_img_base_64, format="WEBP")
|
114 |
+
image.save(act_img_base_64, format="WEBP")
|
115 |
+
mask_bytes_ = base64.b64encode(mask_img_base_64.getvalue()).decode("utf-8")
|
116 |
+
image_bytes_ = base64.b64encode(act_img_base_64.getvalue()).decode("utf-8")
|
117 |
+
|
118 |
+
mask_data_uri = f"data:image/webp;base64,{mask_bytes_}"
|
119 |
+
image_data_uri = f"data:image/webp;base64,{image_bytes_}"
|
120 |
+
|
121 |
+
input = {
|
122 |
+
"mask": mask_data_uri,
|
123 |
+
"image": image_data_uri,
|
124 |
+
"prompt": f"Dull {clothing_type}, non-reflective clothing, properly worn, natural setting, elegant, natural look, neckline without jewellery, simple, perfect eyes, perfect face, perfect body, high quality, realistic, photorealistic, high resolution,traditional full sleeve blouse",
|
125 |
+
"negative_prompt": "necklaces, jewellery, jewelry, necklace, neckpiece, garland, chain, neck wear, jewelled neck, jeweled neck, necklace on neck, jewellery on neck, accessories, watermark, text, changed background, wider body, narrower body, bad proportions, extra limbs, mutated hands, changed sizes, altered proportions, unnatural body proportions, blury, ugly",
|
126 |
+
"num_inference_steps": 25
|
127 |
+
}
|
128 |
+
|
129 |
+
output = replicate_run_cto(input)
|
130 |
+
image_base = str(output[0])
|
131 |
+
image_base_64 = image_base.split(",")[1]
|
132 |
+
image_data = base64.b64decode(image_base_64)
|
133 |
+
|
134 |
+
output_image = Image.open(BytesIO(image_data)).resize(actual_image.size)
|
135 |
+
output_image = np.bitwise_and(
|
136 |
+
np.array(output_image),
|
137 |
+
np.bitwise_not(np.array(Image.fromarray(arr_orig).convert("RGB"))),
|
138 |
+
)
|
139 |
+
|
140 |
+
result = Image.fromarray(np.bitwise_or(np.array(output_image), np.array(jewellery_mask)))
|
141 |
+
|
142 |
+
in_mem_file = BytesIO()
|
143 |
+
result.save(in_mem_file, format="WEBP", quality=85)
|
144 |
+
base_64_output = base64.b64encode(in_mem_file.getvalue()).decode('utf-8')
|
145 |
+
|
146 |
+
response = {
|
147 |
+
"output": f"data:image/WEBP;base64,{base_64_output}",
|
148 |
+
'code': 200
|
149 |
+
}
|
150 |
+
|
151 |
+
return JSONResponse(content=response, status_code=200)
|
152 |
+
|
153 |
+
|
154 |
+
@nto_cto_router.post("/productData/{storeId}")
|
155 |
+
async def product_data(
|
156 |
+
storeId: str,
|
157 |
+
filterattributes: List[Dict[str, Union[str, int, float]]],
|
158 |
+
storename: str = Header(default="default")
|
159 |
+
):
|
160 |
+
"""Filters product data based on the provided attributes and store ID."""
|
161 |
+
|
162 |
+
try:
|
163 |
+
response = supabase.table('MagicMirror').select("*").execute()
|
164 |
+
df = pd.DataFrame(response.dict()["data"])
|
165 |
+
|
166 |
+
df = df[df["StoreName"] == storeId]
|
167 |
+
|
168 |
+
# Preprocess filterattributes to handle multiple or duplicated attributes
|
169 |
+
attribute_dict = {}
|
170 |
+
for attr in filterattributes:
|
171 |
+
key, value = list(attr.items())[
|
172 |
+
0] # This will convert the dictionary into a list and get the key and value.
|
173 |
+
if key in attribute_dict: # This will check if the key is already present in the dictionary.
|
174 |
+
if isinstance(attribute_dict[key],
|
175 |
+
list): # This will create a list if there are multiple values for the same key and we are doing or operation.
|
176 |
+
attribute_dict[key].append(value) # This will append the value to the list.
|
177 |
+
else:
|
178 |
+
attribute_dict[key] = [attribute_dict[key], value]
|
179 |
+
else:
|
180 |
+
attribute_dict[key] = [value] # This will create a list if there is only one value for the key.
|
181 |
+
|
182 |
+
priceFrom = None
|
183 |
+
priceTo = None
|
184 |
+
weightFrom = None
|
185 |
+
weightTo = None
|
186 |
+
weightAscending = None
|
187 |
+
priceAscending = None
|
188 |
+
idAscending = None
|
189 |
+
dateAscending = None
|
190 |
+
|
191 |
+
for key, value in attribute_dict.items():
|
192 |
+
if key == 'priceFrom':
|
193 |
+
priceFrom = value[0]
|
194 |
+
|
195 |
+
elif key == "priceTo":
|
196 |
+
priceTo = value[0]
|
197 |
+
|
198 |
+
elif key == "priceAscending":
|
199 |
+
priceAscending = value[0]
|
200 |
+
|
201 |
+
elif key == "weightFrom":
|
202 |
+
weightFrom = value[0]
|
203 |
+
|
204 |
+
elif key == "weightTo":
|
205 |
+
weightTo = value[0]
|
206 |
+
|
207 |
+
elif key == "weightAscending":
|
208 |
+
weightAscending = value[0]
|
209 |
+
|
210 |
+
elif key == "idAscending":
|
211 |
+
idAscending = value[0]
|
212 |
+
|
213 |
+
elif key == "dateAscending":
|
214 |
+
dateAscending = value[0]
|
215 |
+
|
216 |
+
df["image_url"] = df.apply(
|
217 |
+
lambda row: supabaseGetPublicURL(f"{row['StoreName']}/{row['Category']}/image/{row['Id']}.png"),
|
218 |
+
axis=1)
|
219 |
+
df["thumbnail_url"] = df.apply(
|
220 |
+
lambda row: supabaseGetPublicURL(f"{row['StoreName']}/{row['Category']}/thumbnail/{row['Id']}.png"),
|
221 |
+
axis=1)
|
222 |
+
|
223 |
+
df.reset_index(drop=True, inplace=True)
|
224 |
+
for key, values in attribute_dict.items():
|
225 |
+
try:
|
226 |
+
df = df[df[key].isin(values)]
|
227 |
+
|
228 |
+
except:
|
229 |
+
pass
|
230 |
+
|
231 |
+
# applying filter for price and weight
|
232 |
+
if priceFrom is not None:
|
233 |
+
df = df[df["Price"] >= priceFrom]
|
234 |
+
if priceTo is not None:
|
235 |
+
df = df[df["Price"] <= priceTo]
|
236 |
+
if weightFrom is not None:
|
237 |
+
df = df[df["Weight"] >= weightFrom]
|
238 |
+
if weightTo is not None:
|
239 |
+
df = df[df["Weight"] <= weightTo]
|
240 |
+
|
241 |
+
if priceAscending is not None:
|
242 |
+
if priceAscending == 1:
|
243 |
+
value = True
|
244 |
+
|
245 |
+
else:
|
246 |
+
value = False
|
247 |
+
df = df.sort_values(by="Price", ascending=value)
|
248 |
+
if weightAscending is not None:
|
249 |
+
if weightAscending == 1:
|
250 |
+
value = True
|
251 |
+
|
252 |
+
else:
|
253 |
+
value = False
|
254 |
+
df = df.sort_values(by="Weight", ascending=value)
|
255 |
+
|
256 |
+
if idAscending is not None:
|
257 |
+
if idAscending == 1:
|
258 |
+
value = True
|
259 |
+
else:
|
260 |
+
value = False
|
261 |
+
df = df.sort_values(by="Id", ascending=value)
|
262 |
+
|
263 |
+
if dateAscending is not None:
|
264 |
+
if dateAscending == 1:
|
265 |
+
value = True
|
266 |
+
else:
|
267 |
+
value = False
|
268 |
+
df = df.sort_values(by="UpdatedAt", ascending=value)
|
269 |
+
|
270 |
+
df = df.drop(["CreatedAt", "EstimatedPrice"], axis=1)
|
271 |
+
|
272 |
+
result = {}
|
273 |
+
for _, row in df.iterrows():
|
274 |
+
category = row["Category"]
|
275 |
+
if category not in result: # this is for checking duplicate category
|
276 |
+
result[category] = []
|
277 |
+
result[category].append(row.to_dict())
|
278 |
+
|
279 |
+
return JSONResponse(content=jsonable_encoder(result)) # this will convert the result into json format.
|
280 |
+
|
281 |
+
except Exception as e:
|
282 |
+
raise HTTPException(status_code=500, detail=f"Failed to fetch or process data: {e}")
|
283 |
+
|
284 |
+
|
285 |
+
async def parse_necklace_try_on_id(necklaceImageId: str = Form(...),
|
286 |
+
necklaceCategory: str = Form(...),
|
287 |
+
storename: str = Form(...),
|
288 |
+
api_token: str = Form(...)) -> NecklaceTryOnIDEntity:
|
289 |
+
return NecklaceTryOnIDEntity(
|
290 |
+
necklaceImageId=necklaceImageId,
|
291 |
+
necklaceCategory=necklaceCategory,
|
292 |
+
storename=storename,
|
293 |
+
api_token=api_token
|
294 |
+
)
|
295 |
+
|
296 |
+
|
297 |
+
@nto_cto_router.post("/necklaceTryOnID")
|
298 |
+
async def necklace_try_on_id(necklace_try_on_id: NecklaceTryOnIDEntity = Depends(parse_necklace_try_on_id),
|
299 |
+
image: UploadFile = File(...)):
|
300 |
+
data, _ = supabase.table("APIKeyList").select("*").filter("API_KEY", "eq",
|
301 |
+
necklace_try_on_id.api_token).execute()
|
302 |
+
|
303 |
+
api_key_actual = data[1][0]['API_KEY']
|
304 |
+
if api_key_actual != necklace_try_on_id.api_token:
|
305 |
+
return JSONResponse(content={"error": "Invalid API Key"}, status_code=401)
|
306 |
+
|
307 |
+
else:
|
308 |
+
imageBytes = await image.read()
|
309 |
+
|
310 |
+
jewellery_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/Stores/{necklace_try_on_id.storename}/{necklace_try_on_id.necklaceCategory}/image/{necklace_try_on_id.necklaceImageId}.png"
|
311 |
+
|
312 |
+
try:
|
313 |
+
image, jewellery = Image.open(BytesIO(imageBytes)), Image.open(returnBytesData(url=jewellery_url))
|
314 |
+
|
315 |
+
except:
|
316 |
+
error_message = {
|
317 |
+
"error": "The requested resource (Image, necklace category, or store) is not available. Please verify the availability and try again."
|
318 |
+
}
|
319 |
+
|
320 |
+
return JSONResponse(content=error_message, status_code=404)
|
321 |
+
|
322 |
+
result, headetText, mask = await pipeline.necklaceTryOn_(image=image, jewellery=jewellery,
|
323 |
+
storename=necklace_try_on_id.storename)
|
324 |
+
|
325 |
+
inMemFile = BytesIO()
|
326 |
+
inMemFileMask = BytesIO()
|
327 |
+
result.save(inMemFile, format="WEBP", quality=85)
|
328 |
+
mask.save(inMemFileMask, format="WEBP", quality=85)
|
329 |
+
outputBytes = inMemFile.getvalue()
|
330 |
+
maskBytes = inMemFileMask.getvalue()
|
331 |
+
response = {
|
332 |
+
"output": f"data:image/WEBP;base64,{base64.b64encode(outputBytes).decode('utf-8')}",
|
333 |
+
"mask": f"data:image/WEBP;base64,{base64.b64encode(maskBytes).decode('utf-8')}"
|
334 |
+
}
|
335 |
+
creditResponse = deductAndTrackCredit(storename=necklace_try_on_id.storename, endpoint="/necklaceTryOnID")
|
336 |
+
if creditResponse == "No Credits Available":
|
337 |
+
response = {
|
338 |
+
"error": "No Credits Remaining"
|
339 |
+
}
|
340 |
+
|
341 |
+
return JSONResponse(content=response)
|
342 |
+
|
343 |
+
else:
|
344 |
+
return JSONResponse(content=response)
|
345 |
+
|
346 |
+
|
347 |
+
@nto_cto_router.post("/canvasPoints")
|
348 |
+
async def canvas_points(necklace_try_on_id: NecklaceTryOnIDEntity = Depends(parse_necklace_try_on_id),
|
349 |
+
image: UploadFile = File(...)):
|
350 |
+
imageBytes = await image.read()
|
351 |
+
|
352 |
+
jewellery_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/Stores/{necklace_try_on_id.storename}/{necklace_try_on_id.necklaceCategory}/image/{necklace_try_on_id.necklaceImageId}.png"
|
353 |
+
|
354 |
+
try:
|
355 |
+
image, jewellery = Image.open(BytesIO(imageBytes)), Image.open(returnBytesData(url=jewellery_url))
|
356 |
+
|
357 |
+
except:
|
358 |
+
error_message = {
|
359 |
+
"error": "The requested resource (Image, necklace category, or store) is not available. Please verify the availability and try again."
|
360 |
+
}
|
361 |
+
|
362 |
+
return JSONResponse(content=error_message, status_code=404)
|
363 |
+
|
364 |
+
response = await pipeline.canvasPoint(
|
365 |
+
image=image, jewellery=jewellery, storename=necklace_try_on_id.storename
|
366 |
+
)
|
367 |
+
|
368 |
+
creditResponse = deductAndTrackCredit(storename=necklace_try_on_id.storename, endpoint="/necklaceTryOnID")
|
369 |
+
if creditResponse == "No Credits Available":
|
370 |
+
response = {
|
371 |
+
"error": "No Credits Remaining"
|
372 |
+
}
|
373 |
+
|
374 |
+
return JSONResponse(content=response)
|
375 |
+
|
376 |
+
else:
|
377 |
+
return JSONResponse(content=response)
|
378 |
+
|
379 |
+
|
380 |
+
@nto_cto_router.post("/necklaceTryOnWithPoints")
|
381 |
+
async def necklace_try_on_with_points(necklace_try_on_id: NecklaceTryOnIDEntity = Depends(parse_necklace_try_on_id),
|
382 |
+
image: UploadFile = File(...),
|
383 |
+
left_x: int = Form(...),
|
384 |
+
left_y: int = Form(...),
|
385 |
+
right_x: int = Form(...),
|
386 |
+
right_y: int = Form(...)):
|
387 |
+
imageBytes = await image.read()
|
388 |
+
|
389 |
+
jewellery_url = f"https://lvuhhlrkcuexzqtsbqyu.supabase.co/storage/v1/object/public/Stores/{necklace_try_on_id.storename}/{necklace_try_on_id.necklaceCategory}/image/{necklace_try_on_id.necklaceImageId}.png"
|
390 |
+
|
391 |
+
try:
|
392 |
+
image, jewellery = Image.open(BytesIO(imageBytes)), Image.open(returnBytesData(url=jewellery_url))
|
393 |
+
|
394 |
+
except:
|
395 |
+
error_message = {
|
396 |
+
"error": "The requested resource (Image, necklace category, or store) is not available. Please verify the availability and try again."
|
397 |
+
}
|
398 |
+
|
399 |
+
return JSONResponse(content=error_message, status_code=404)
|
400 |
+
|
401 |
+
result, headerText, mask = await pipeline.necklaceTryOnWithPoints_(
|
402 |
+
image=image, jewellery=jewellery, left_shoulder=(left_x, left_y), right_shoulder=(right_x, right_y),
|
403 |
+
storename=necklace_try_on_id.storename
|
404 |
+
)
|
405 |
+
|
406 |
+
inMemFile = BytesIO()
|
407 |
+
inMemFileMask = BytesIO()
|
408 |
+
result.save(inMemFile, format="WEBP", quality=85)
|
409 |
+
mask.save(inMemFileMask, format="WEBP", quality=85)
|
410 |
+
outputBytes = inMemFile.getvalue()
|
411 |
+
maskBytes = inMemFileMask.getvalue()
|
412 |
+
response = {
|
413 |
+
"output": f"data:image/WEBP;base64,{base64.b64encode(outputBytes).decode('utf-8')}",
|
414 |
+
"mask": f"data:image/WEBP;base64,{base64.b64encode(maskBytes).decode('utf-8')}"
|
415 |
+
}
|
416 |
+
|
417 |
+
creditResponse = deductAndTrackCredit(storename=necklace_try_on_id.storename, endpoint="/necklaceTryOnID")
|
418 |
+
if creditResponse == "No Credits Available":
|
419 |
+
response = {
|
420 |
+
"error": "No Credits Remaining"
|
421 |
+
}
|
422 |
+
|
423 |
+
return JSONResponse(content=response)
|
424 |
+
|
425 |
+
else:
|
426 |
+
return JSONResponse(content=response)
|
src/components/auto_crop.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
project @ NTO-TCP-HF
|
3 |
+
created @ 2024-10-28
|
4 |
+
author @ github.com/ishworrsubedii
|
5 |
+
"""
|
6 |
+
from io import BytesIO
|
7 |
+
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
|
11 |
+
def crop_transparent_image(image_data: bytes) -> tuple[bytes, dict]:
|
12 |
+
try:
|
13 |
+
image = Image.open(BytesIO(image_data))
|
14 |
+
|
15 |
+
if image.format != 'PNG':
|
16 |
+
raise ValueError("Only PNG images are supported")
|
17 |
+
|
18 |
+
width = image.size[0]
|
19 |
+
height = image.size[1]
|
20 |
+
pixels = image.load()
|
21 |
+
|
22 |
+
top = height
|
23 |
+
bottom = 0
|
24 |
+
left = width
|
25 |
+
right = 0
|
26 |
+
|
27 |
+
# Find boundaries of non-transparent pixels
|
28 |
+
for y in range(height):
|
29 |
+
for x in range(width):
|
30 |
+
pixel = pixels[x, y]
|
31 |
+
if isinstance(pixel, tuple) and len(pixel) == 4:
|
32 |
+
if pixel[3] != 0:
|
33 |
+
left = min(left, x)
|
34 |
+
top = min(top, y)
|
35 |
+
right = max(right, x)
|
36 |
+
bottom = max(bottom, y)
|
37 |
+
|
38 |
+
left = max(0, left)
|
39 |
+
top = max(0, top)
|
40 |
+
right = min(width, right + 1)
|
41 |
+
bottom = min(height, bottom + 1)
|
42 |
+
|
43 |
+
if left >= right or top >= bottom:
|
44 |
+
left, top, right, bottom = 0, 0, width, height
|
45 |
+
|
46 |
+
# Crop image
|
47 |
+
cropped_image = image.crop((left, top, right, bottom))
|
48 |
+
|
49 |
+
output_buffer = BytesIO()
|
50 |
+
cropped_image.save(output_buffer, format='PNG')
|
51 |
+
output_buffer.seek(0)
|
52 |
+
|
53 |
+
metadata = {
|
54 |
+
"original_size": f"{width}x{height}",
|
55 |
+
"cropped_size": f"{cropped_image.width}x{cropped_image.height}"
|
56 |
+
}
|
57 |
+
|
58 |
+
return output_buffer.getvalue(), metadata
|
59 |
+
|
60 |
+
except Exception as e:
|
61 |
+
raise ValueError(f"Error processing image: {str(e)}")
|
src/pipelines/completePipeline.py
CHANGED
@@ -28,3 +28,4 @@ class Pipeline:
|
|
28 |
right_point=right_shoulder,
|
29 |
storename=storename)
|
30 |
return [result, headerText, mask]
|
|
|
|
28 |
right_point=right_shoulder,
|
29 |
storename=storename)
|
30 |
return [result, headerText, mask]
|
31 |
+
|