Spaces:
Running
Running
File size: 10,113 Bytes
0a89b21 027d724 0a89b21 e3f244a 0a89b21 e3f244a 027d724 0a89b21 e3f244a 0a89b21 027d724 e3f244a 0a89b21 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 31e0235 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 0a89b21 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 027d724 e3f244a 0a89b21 027d724 0a89b21 027d724 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
import gradio as gr
import plotly.graph_objs as go
import trimesh
import numpy as np
from PIL import Image, ImageDraw
import torch
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
import os
import matplotlib.pyplot as plt
# Load the Stable Diffusion model for text-to-image generation and inpainting
device = "cuda" if torch.cuda.is_available() else "cpu"
pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(device)
pipeline_inpaint = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",
torch_dtype=torch.float16
).to(device)
# Get the current directory
current_dir = os.getcwd()
# Default object file path
DEFAULT_OBJ_FILE = os.path.join(current_dir, "female.obj")
# Temporary texture file path
TEMP_TEXTURE_FILE = os.path.join(current_dir, "generated_texture.png")
# File path to save the 2D image
OUTPUT_IMAGE_FILE = os.path.join(current_dir, "output_image.png")
DEFAULT_GLB_FILE = os.path.join(current_dir, "vroid_girl1.glb")
def apply_texture(mesh, texture_file, uv_scale):
"""
Applies the texture to the mesh with UV scaling to make triangles/rectangles smaller or larger.
"""
texture_image = Image.open(texture_file)
uv_coords = mesh.visual.uv
# Apply scaling to UV coordinates to make the mapping finer or coarser
uv_coords = np.clip(uv_coords * uv_scale, 0, 1)
# Get the size of the texture image
img_width, img_height = texture_image.size
texture_array = np.array(texture_image)
# Prepare to store the colors per face
face_colors = []
for face in mesh.faces:
uv_face = uv_coords[face]
pixel_coords = np.round(uv_face * np.array([img_width - 1, img_height - 1])).astype(int)
# Ensure the UV coordinates are within the bounds of the texture image
valid_coords = np.all((pixel_coords[:, 0] >= 0) & (pixel_coords[:, 0] < img_width) &
(pixel_coords[:, 1] >= 0) & (pixel_coords[:, 1] < img_height))
if valid_coords:
# Get the average color for the face from the corresponding UV points in the texture
face_color = np.mean(texture_array[pixel_coords[:, 1], pixel_coords[:, 0]], axis=0)
face_colors.append(face_color / 255.0) # Normalize to [0, 1]
else:
# Assign a default color (e.g., gray) if UV coordinates are not valid
face_colors.append([0.5, 0.5, 0.5]) # Default to gray if no texture is applied
# Ensure no face is left unpainted
face_colors = np.array(face_colors)
if len(face_colors) < len(mesh.faces):
face_colors = np.pad(face_colors, ((0, len(mesh.faces) - len(face_colors)), (0, 0)), 'constant', constant_values=0.5)
return face_colors
def load_glb_file(filename):
trimesh_scene = trimesh.load(filename)
if isinstance(trimesh_scene, trimesh.Scene):
mesh = trimesh_scene.dump(concatenate=True)
else:
mesh = trimesh_scene
return mesh
def generate_clothing_image(prompt, num_inference_steps):
"""
Generates the clothing texture based on the provided prompt and number of inference steps.
"""
image = pipeline(prompt, num_inference_steps=num_inference_steps).images[0]
image.save(TEMP_TEXTURE_FILE)
return TEMP_TEXTURE_FILE, image
def generate_uv_specific_texture(prompt, uv_map_file, num_inference_steps=50):
"""
Generates a texture for the 3D model using a given prompt and UV map.
Args:
prompt (str): The prompt for the diffusion model to generate the texture.
uv_map_file (str): Path to the UV map file.
num_inference_steps (int): The number of iterations/steps for the diffusion process.
Returns:
(str, PIL.Image): The path to the generated texture file and the generated texture image.
"""
# Load UV map as a mask
uv_map = Image.open(uv_map_file)
# Generate texture based on UV map and the provided prompt
image = pipeline_inpaint(
prompt=prompt,
image=uv_map,
mask_image=uv_map,
num_inference_steps=num_inference_steps # Set custom number of inference steps
).images[0]
# Save the generated texture
image.save(TEMP_TEXTURE_FILE)
return TEMP_TEXTURE_FILE, image
def display_3d_object(obj_file, texture_file, light_intensity, ambient_intensity, color, uv_scale, transparency):
"""
Displays the 3D object with applied texture or color, with support for UV scaling and transparency.
"""
file_extension = obj_file.split('.')[-1].lower()
# Load mesh
if file_extension == 'obj':
mesh = trimesh.load(obj_file)
elif file_extension == 'glb':
mesh = load_glb_file(obj_file)
else:
raise ValueError("Unsupported file format. Please upload a .obj or .glb file.")
# Apply texture or color
if texture_file:
face_colors = apply_texture(mesh, texture_file, uv_scale)
else:
face_colors = np.array([color] * len(mesh.faces)) # Use a single color for all faces if no texture
# Define lighting settings
fig = go.Figure(data=[
go.Mesh3d(
x=mesh.vertices[:, 0],
y=mesh.vertices[:, 1],
z=mesh.vertices[:, 2],
i=mesh.faces[:, 0],
j=mesh.faces[:, 1],
k=mesh.faces[:, 2],
facecolor=face_colors if texture_file else None,
color=color if not texture_file else None,
opacity=transparency, # Adjustable transparency
lighting=dict(
ambient=ambient_intensity,
diffuse=light_intensity,
specular=0.8, # Fine-tuned specular to avoid excessive shininess
roughness=0.3,
fresnel=0.1
),
lightposition=dict(
x=100,
y=200,
z=300
)
)
])
fig.update_layout(scene=dict(aspectmode='data'))
return fig
def clear_texture():
"""
Clears the texture preview and removes the texture file, allowing for a reset.
"""
if os.path.exists(TEMP_TEXTURE_FILE):
os.remove(TEMP_TEXTURE_FILE)
return None
def restore_original(obj_file):
"""
Restores the original 3D object without any applied texture.
"""
return display_3d_object(obj_file, None, 0.8, 0.5, "#D3D3D3", 1.0, 1.0) # Default settings for restoration
def update_texture_display(prompt, texture_file, num_inference_steps):
"""
Update the texture display either by generating a texture from the prompt
or by displaying an uploaded texture.
"""
if prompt:
# Generate new texture based on the prompt using the Stable Diffusion pipeline
texture_path, image = generate_clothing_image(prompt, num_inference_steps)
return image
elif texture_file:
# Display the uploaded texture file
return Image.open(texture_file)
return None
with gr.Blocks() as demo:
gr.Markdown("## 3D Object Viewer with Custom Texture, UV Scale, Transparency, Color, and Adjustable Lighting")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Texture Options")
prompt_input = gr.Textbox(label="Enter a Prompt to Generate Texture", placeholder="Type a prompt...")
num_inference_steps_slider = gr.Slider(minimum=5, maximum=100, step=1, value=10, label="Num Inference Steps")
generate_button = gr.Button("Generate Texture")
texture_file = gr.File(label="Upload Texture file (PNG or JPG, optional)", type="filepath")
texture_preview = gr.Image(label="Texture Preview", visible=True)
gr.Markdown("### Mapping, Lighting & Color Settings")
uv_scale_slider = gr.Slider(minimum=0.1, maximum=5, step=0.1, value=1.0, label="UV Mapping Scale (Make smaller/bigger)")
light_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.8, label="Light Intensity")
ambient_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Ambient Intensity")
transparency_slider = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=1.0, label="Transparency (1.0 is fully opaque)")
color_picker = gr.ColorPicker(value="#D3D3D3", label="Object Color")
submit_button = gr.Button("Submit")
restore_button = gr.Button("Restore")
clear_button = gr.Button("Clear")
obj_file = gr.File(label="Upload OBJ or GLB file", value=DEFAULT_OBJ_FILE, type='filepath')
with gr.Column(scale=2):
display = gr.Plot(label="3D Viewer")
def update_display(file, texture, uv_scale, light_intensity, ambient_intensity, transparency, color, num_inference_steps):
texture_to_use = TEMP_TEXTURE_FILE if os.path.exists(TEMP_TEXTURE_FILE) else texture
return display_3d_object(file, texture_to_use, light_intensity, ambient_intensity, color, uv_scale, transparency)
submit_button.click(fn=update_display, inputs=[obj_file, texture_file, uv_scale_slider, light_intensity_slider, ambient_intensity_slider, transparency_slider, color_picker, num_inference_steps_slider], outputs=display)
generate_button.click(fn=update_texture_display, inputs=[prompt_input, texture_file, num_inference_steps_slider], outputs=texture_preview)
restore_button.click(fn=restore_original, inputs=[obj_file], outputs=display)
clear_button.click(fn=clear_texture, outputs=texture_preview)
texture_file.change(fn=update_texture_display, inputs=[prompt_input, texture_file, num_inference_steps_slider], outputs=texture_preview)
demo.load(fn=update_display, inputs=[obj_file, texture_file, uv_scale_slider, light_intensity_slider, ambient_intensity_slider, transparency_slider, color_picker, num_inference_steps_slider], outputs=display)
gr.Examples(
examples=[[DEFAULT_OBJ_FILE, None],[DEFAULT_GLB_FILE, None]],
inputs=[obj_file, texture_file],
label="Example Files"
)
demo.launch(debug=True)
|