Spaces:
Running
Running
File size: 3,341 Bytes
cf66344 82fe2a7 cf66344 8e0af04 488b70a 8e0af04 488b70a 8e0af04 488b70a 8e0af04 488b70a 8e0af04 488b70a 8e0af04 488b70a 8e0af04 488b70a 8e0af04 488b70a 8e0af04 488b70a 8e0af04 82fe2a7 8e0af04 82fe2a7 8e0af04 82fe2a7 8e0af04 82fe2a7 8e0af04 82fe2a7 8e0af04 78a4dc8 8e0af04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import os
import cv2
import gradio as gr
import numpy as np
from transformers import DetrForObjectDetection, DetrImageProcessor
import torch
# Function to detect face and neck for placing jewelry
def detect_face_and_neck(image):
model = DetrForObjectDetection.from_pretrained('facebook/detr-resnet-50')
processor = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50')
inputs = processor(images=image, return_tensors="pt")
outputs = model(**inputs)
target_sizes = torch.tensor([image.shape[:2]])
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes)[0]
neck_box = None
face_box = None
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
if score > 0.7:
if label == 1: # Person (this can include neck)
neck_box = box
elif label == 2: # Face
face_box = box
return face_box, neck_box
# Function to overlay jewelry on the detected regions
def place_jewelry(image, jewelry_image, position):
x, y, w, h = position
resized_jewelry = cv2.resize(jewelry_image, (int(w), int(h)))
# Ensure that the image has an alpha channel (RGBA) for blending
if resized_jewelry.shape[2] == 4:
# Blending using alpha transparency
for c in range(0, 3):
image[y:y+h, x:x+w, c] = resized_jewelry[:, :, c] * (resized_jewelry[:, :, 3] / 255.0) + image[y:y+h, x:x+w, c] * (1.0 - resized_jewelry[:, :, 3] / 255.0)
else:
image[y:y+h, x:x+w] = resized_jewelry
return image
# Try-on function for jewelry
def tryon_jewelry(person_img, jewelry_img, jewelry_type):
# Ensure images are valid
if person_img is None or jewelry_img is None:
return None
# Detect face and neck using Hugging Face model
face_box, neck_box = detect_face_and_neck(person_img)
if jewelry_type == "Necklace" and neck_box is not None:
# Apply necklace on neck region
result_img = place_jewelry(person_img, jewelry_img, neck_box)
elif jewelry_type == "Earrings" and face_box is not None:
# Assuming ears are part of the face box for simplicity
result_img = place_jewelry(person_img, jewelry_img, face_box)
else:
result_img = person_img # If no detection, return original image
return result_img
# Gradio interface setup
css = """
#col-left, #col-mid, #col-right {
margin: 0 auto;
max-width: 430px;
}
"""
with gr.Blocks(css=css) as JewelryTryon:
gr.HTML("<h1>Virtual Jewelry Try-On</h1>")
with gr.Row():
with gr.Column(elem_id="col-left"):
imgs = gr.Image(label="Person image", sources='upload', type="numpy")
with gr.Column(elem_id="col-mid"):
garm_img = gr.Image(label="Jewelry image", sources='upload', type="numpy")
with gr.Column(elem_id="col-right"):
jewelry_type = gr.Dropdown(label="Jewelry Type", choices=['Necklace', 'Earrings', 'Ring'], value="Necklace")
image_out = gr.Image(label="Result", show_share_button=False)
run_button = gr.Button(value="Run")
run_button.click(fn=tryon_jewelry, inputs=[imgs, garm_img, jewelry_type], outputs=image_out)
# Launch Gradio app
JewelryTryon.queue(api_open=False).launch(show_api=False)
|