Spaces:
Runtime error
Runtime error
import os | |
import cv2 | |
import spaces | |
from PIL import Image | |
import gradio as gr | |
import numpy as np | |
import random | |
import base64 | |
import requests | |
import json | |
import time | |
# Add a new function for text-to-image generation | |
def generate_garment_image(prompt): | |
# This is a placeholder function. You'll need to implement actual text-to-image generation here. | |
# For example, you might use a service like DALL-E, Stable Diffusion, or any other text-to-image model. | |
# For now, we'll just return a placeholder image. | |
placeholder_image = np.zeros((256, 256, 3), dtype=np.uint8) | |
cv2.putText(placeholder_image, prompt, (10, 128), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) | |
return placeholder_image | |
def tryon(person_img, garment_prompt, seed, randomize_seed): | |
post_start_time = time.time() | |
if person_img is None or garment_prompt == "": | |
return None, None, "Empty image or prompt" | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
# Generate garment image from prompt | |
garment_img = generate_garment_image(garment_prompt) | |
encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes() | |
encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8') | |
encoded_garment_img = cv2.imencode('.jpg', cv2.cvtColor(garment_img, cv2.COLOR_RGB2BGR))[1].tobytes() | |
encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8') | |
# Rest of the function remains the same | |
# ... | |
def start_tryon(person_img, garment_prompt, seed, randomize_seed): | |
start_time = time.time() | |
if person_img is None or garment_prompt == "": | |
return None, None, "Empty image or prompt" | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
# Generate garment image from prompt | |
garment_img = generate_garment_image(garment_prompt) | |
encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes() | |
encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8') | |
encoded_garment_img = cv2.imencode('.jpg', cv2.cvtColor(garment_img, cv2.COLOR_RGB2BGR))[1].tobytes() | |
encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8') | |
# Rest of the function remains the same | |
# ... | |
MAX_SEED = 999999 | |
example_path = os.path.join(os.path.dirname(__file__), 'assets') | |
human_list = os.listdir(os.path.join(example_path,"human")) | |
human_list_path = [os.path.join(example_path,"human",human) for human in human_list] | |
css=""" | |
#col-left { | |
margin: 0 auto; | |
max-width: 430px; | |
} | |
#col-mid { | |
margin: 0 auto; | |
max-width: 430px; | |
} | |
#col-right { | |
margin: 0 auto; | |
max-width: 430px; | |
} | |
#col-showcase { | |
margin: 0 auto; | |
max-width: 1100px; | |
} | |
#button { | |
color: blue; | |
} | |
""" | |
def load_description(fp): | |
with open(fp, 'r', encoding='utf-8') as f: | |
content = f.read() | |
return content | |
with gr.Blocks(css=css) as Tryon: | |
gr.HTML(load_description("assets/title.md")) | |
with gr.Row(): | |
with gr.Column(elem_id = "col-left"): | |
gr.HTML(""" | |
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;"> | |
<div> | |
Step 1. Upload a person image ⬇️ | |
</div> | |
</div> | |
""") | |
with gr.Column(elem_id = "col-mid"): | |
gr.HTML(""" | |
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;"> | |
<div> | |
Step 2. Enter a garment description ⬇️ | |
</div> | |
</div> | |
""") | |
with gr.Column(elem_id = "col-right"): | |
gr.HTML(""" | |
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;"> | |
<div> | |
Step 3. Press "Run" to get try-on results | |
</div> | |
</div> | |
""") | |
with gr.Row(): | |
with gr.Column(elem_id = "col-left"): | |
imgs = gr.Image(label="Person image", sources='upload', type="numpy") | |
example = gr.Examples( | |
inputs=imgs, | |
examples_per_page=12, | |
examples=human_list_path | |
) | |
with gr.Column(elem_id = "col-mid"): | |
garm_prompt = gr.Textbox(label="Garment description", placeholder="Enter a description of the garment...") | |
example_prompts = gr.Examples( | |
inputs=garm_prompt, | |
examples=["A red t-shirt", "Blue jeans", "A floral summer dress", "A black leather jacket"] | |
) | |
with gr.Column(elem_id = "col-right"): | |
image_out = gr.Image(label="Result", show_share_button=False) | |
with gr.Row(): | |
seed = gr.Slider( | |
label="Seed", | |
minimum=0, | |
maximum=MAX_SEED, | |
step=1, | |
value=0, | |
) | |
randomize_seed = gr.Checkbox(label="Random seed", value=True) | |
with gr.Row(): | |
seed_used = gr.Number(label="Seed used") | |
result_info = gr.Text(label="Response") | |
test_button = gr.Button(value="Run", elem_id="button") | |
test_button.click(fn=tryon, inputs=[imgs, garm_prompt, seed, randomize_seed], outputs=[image_out, seed_used, result_info], api_name='tryon', concurrency_limit=40) | |
with gr.Column(elem_id = "col-showcase"): | |
gr.HTML(""" | |
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;"> | |
<div> </div> | |
<br> | |
<div> | |
Virtual try-on examples in pairs of person images and garment descriptions | |
</div> | |
</div> | |
""") | |
show_case = gr.Examples( | |
examples=[ | |
["assets/examples/model2.png", "A blue t-shirt", "assets/examples/result2.png"], | |
["assets/examples/model3.png", "A red dress", "assets/examples/result3.png"], | |
["assets/examples/model1.png", "A black suit", "assets/examples/result1.png"], | |
], | |
inputs=[imgs, garm_prompt, image_out], | |
label=None | |
) | |
Tryon.launch() |