Spaces:
Running
Running
updates new formats
Browse files- .gitignore +1 -0
- app.py +183 -121
.gitignore
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
|
2 |
/.venv
|
|
|
|
1 |
|
2 |
/.venv
|
3 |
+
*.obj
|
app.py
CHANGED
@@ -5,8 +5,10 @@ import numpy as np
|
|
5 |
from PIL import Image, ImageDraw
|
6 |
import torch
|
7 |
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
|
8 |
-
import
|
9 |
import matplotlib.pyplot as plt
|
|
|
|
|
10 |
|
11 |
# Load the Stable Diffusion model for text-to-image generation and inpainting
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -16,58 +18,85 @@ pipeline_inpaint = StableDiffusionInpaintPipeline.from_pretrained(
|
|
16 |
torch_dtype=torch.float16
|
17 |
).to(device)
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
|
|
29 |
|
30 |
-
def apply_texture(mesh,
|
31 |
"""
|
32 |
-
Applies the texture to the mesh with UV scaling
|
33 |
"""
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
#
|
|
|
|
|
|
|
|
|
38 |
uv_coords = np.clip(uv_coords * uv_scale, 0, 1)
|
39 |
|
40 |
-
# Get the size of the texture image
|
41 |
img_width, img_height = texture_image.size
|
42 |
texture_array = np.array(texture_image)
|
43 |
|
44 |
-
# Prepare to store the colors per face
|
45 |
face_colors = []
|
46 |
|
47 |
for face in mesh.faces:
|
48 |
uv_face = uv_coords[face]
|
49 |
pixel_coords = np.round(uv_face * np.array([img_width - 1, img_height - 1])).astype(int)
|
50 |
|
51 |
-
# Ensure the UV coordinates are within the bounds of the texture image
|
52 |
valid_coords = np.all((pixel_coords[:, 0] >= 0) & (pixel_coords[:, 0] < img_width) &
|
53 |
(pixel_coords[:, 1] >= 0) & (pixel_coords[:, 1] < img_height))
|
54 |
|
55 |
if valid_coords:
|
56 |
-
# Get the average color for the face from the corresponding UV points in the texture
|
57 |
face_color = np.mean(texture_array[pixel_coords[:, 1], pixel_coords[:, 0]], axis=0)
|
58 |
-
face_colors.append(face_color / 255.0)
|
59 |
else:
|
60 |
-
|
61 |
-
face_colors.append([0.5, 0.5, 0.5]) # Default to gray if no texture is applied
|
62 |
|
63 |
-
# Ensure no face is left unpainted
|
64 |
face_colors = np.array(face_colors)
|
65 |
if len(face_colors) < len(mesh.faces):
|
66 |
face_colors = np.pad(face_colors, ((0, len(mesh.faces) - len(face_colors)), (0, 0)), 'constant', constant_values=0.5)
|
67 |
|
68 |
return face_colors
|
69 |
|
70 |
-
|
71 |
def load_glb_file(filename):
|
72 |
trimesh_scene = trimesh.load(filename)
|
73 |
if isinstance(trimesh_scene, trimesh.Scene):
|
@@ -81,112 +110,115 @@ def generate_clothing_image(prompt, num_inference_steps):
|
|
81 |
Generates the clothing texture based on the provided prompt and number of inference steps.
|
82 |
"""
|
83 |
image = pipeline(prompt, num_inference_steps=num_inference_steps).images[0]
|
84 |
-
image
|
85 |
-
return TEMP_TEXTURE_FILE, image
|
86 |
|
87 |
-
def
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
Returns:
|
97 |
-
(str, PIL.Image): The path to the generated texture file and the generated texture image.
|
98 |
-
"""
|
99 |
-
# Load UV map as a mask
|
100 |
-
uv_map = Image.open(uv_map_file)
|
101 |
-
|
102 |
-
# Generate texture based on UV map and the provided prompt
|
103 |
-
image = pipeline_inpaint(
|
104 |
-
prompt=prompt,
|
105 |
-
image=uv_map,
|
106 |
-
mask_image=uv_map,
|
107 |
-
num_inference_steps=num_inference_steps # Set custom number of inference steps
|
108 |
-
).images[0]
|
109 |
-
|
110 |
-
# Save the generated texture
|
111 |
-
image.save(TEMP_TEXTURE_FILE)
|
112 |
-
|
113 |
-
return TEMP_TEXTURE_FILE, image
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
"""
|
119 |
file_extension = obj_file.split('.')[-1].lower()
|
120 |
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
-
# Apply texture or color
|
130 |
-
if texture_file:
|
131 |
-
face_colors = apply_texture(mesh, texture_file, uv_scale)
|
132 |
else:
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
)
|
159 |
-
)
|
160 |
-
|
161 |
-
fig.update_layout(scene=dict(aspectmode='data'))
|
162 |
|
163 |
-
|
164 |
|
165 |
def clear_texture():
|
166 |
-
"""
|
167 |
-
Clears the texture preview and removes the texture file, allowing for a reset.
|
168 |
-
"""
|
169 |
-
if os.path.exists(TEMP_TEXTURE_FILE):
|
170 |
-
os.remove(TEMP_TEXTURE_FILE)
|
171 |
return None
|
172 |
|
173 |
def restore_original(obj_file):
|
174 |
-
""
|
175 |
-
Restores the original 3D object without any applied texture.
|
176 |
-
"""
|
177 |
-
return display_3d_object(obj_file, None, 0.8, 0.5, "#D3D3D3", 1.0, 1.0) # Default settings for restoration
|
178 |
|
179 |
def update_texture_display(prompt, texture_file, num_inference_steps):
|
180 |
-
"""
|
181 |
-
Update the texture display either by generating a texture from the prompt
|
182 |
-
or by displaying an uploaded texture.
|
183 |
-
"""
|
184 |
if prompt:
|
185 |
-
|
186 |
-
texture_path, image = generate_clothing_image(prompt, num_inference_steps)
|
187 |
return image
|
188 |
elif texture_file:
|
189 |
-
# Display the uploaded texture file
|
190 |
return Image.open(texture_file)
|
191 |
return None
|
192 |
|
@@ -203,35 +235,65 @@ with gr.Blocks() as demo:
|
|
203 |
texture_preview = gr.Image(label="Texture Preview", visible=True)
|
204 |
|
205 |
gr.Markdown("### Mapping, Lighting & Color Settings")
|
206 |
-
uv_scale_slider = gr.Slider(minimum=0.1, maximum=5, step=0.1, value=1.0, label="UV Mapping Scale
|
|
|
207 |
light_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.8, label="Light Intensity")
|
208 |
ambient_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Ambient Intensity")
|
209 |
-
transparency_slider = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=1.0, label="Transparency
|
210 |
color_picker = gr.ColorPicker(value="#D3D3D3", label="Object Color")
|
211 |
submit_button = gr.Button("Submit")
|
212 |
restore_button = gr.Button("Restore")
|
213 |
clear_button = gr.Button("Clear")
|
214 |
-
obj_file = gr.File(label="Upload OBJ or
|
215 |
|
216 |
with gr.Column(scale=2):
|
217 |
display = gr.Plot(label="3D Viewer")
|
218 |
|
219 |
-
def update_display(file, texture, uv_scale, light_intensity, ambient_intensity, transparency, color, num_inference_steps):
|
220 |
-
|
221 |
-
|
|
|
|
|
|
|
222 |
|
223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
generate_button.click(fn=update_texture_display, inputs=[prompt_input, texture_file, num_inference_steps_slider], outputs=texture_preview)
|
225 |
restore_button.click(fn=restore_original, inputs=[obj_file], outputs=display)
|
226 |
clear_button.click(fn=clear_texture, outputs=texture_preview)
|
227 |
texture_file.change(fn=update_texture_display, inputs=[prompt_input, texture_file, num_inference_steps_slider], outputs=texture_preview)
|
228 |
|
229 |
-
demo.load(fn=update_display, inputs=[obj_file, texture_file, uv_scale_slider, light_intensity_slider, ambient_intensity_slider, transparency_slider, color_picker, num_inference_steps_slider], outputs=display)
|
|
|
|
|
|
|
230 |
|
231 |
gr.Examples(
|
232 |
-
examples=[[
|
|
|
|
|
|
|
|
|
233 |
inputs=[obj_file, texture_file],
|
234 |
label="Example Files"
|
235 |
)
|
236 |
|
237 |
demo.launch(debug=True)
|
|
|
|
5 |
from PIL import Image, ImageDraw
|
6 |
import torch
|
7 |
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
|
8 |
+
import io
|
9 |
import matplotlib.pyplot as plt
|
10 |
+
#import pyrender
|
11 |
+
#import scipy
|
12 |
|
13 |
# Load the Stable Diffusion model for text-to-image generation and inpainting
|
14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
18 |
torch_dtype=torch.float16
|
19 |
).to(device)
|
20 |
|
21 |
+
DEFAULT_OBJ_FILE = "female.obj"
|
22 |
+
DEFAULT_GLB_FILE = "vroid_girl1.glb"
|
23 |
+
DEFAULT_VRM_FILE = "fischl.vrm"
|
24 |
+
DEFAULT_VRM_FILE2 = "woman.vrm"
|
25 |
+
DEFAULT_VRM_FILE3 = "mona.vrm"
|
26 |
+
DEFAULT_TEXTURE = "future.png"
|
27 |
+
DEFAULT_TEXTURE2 = "woman1.jpeg"
|
28 |
+
DEFAULT_TEXTURE3 = "woman2.jpeg"
|
29 |
+
def generate_default_uv(mesh, quality='medium'):
|
30 |
+
"""
|
31 |
+
Generate default UV coordinates for a mesh if UV mapping is missing.
|
32 |
+
"""
|
33 |
+
if quality == 'low':
|
34 |
+
bounds = mesh.bounds
|
35 |
+
width = bounds[1][0] - bounds[0][0]
|
36 |
+
height = bounds[1][1] - bounds[0][1]
|
37 |
+
uv_coords = np.zeros((len(mesh.vertices), 2))
|
38 |
+
uv_coords[:, 0] = (mesh.vertices[:, 0] - bounds[0][0]) / width
|
39 |
+
uv_coords[:, 1] = (mesh.vertices[:, 1] - bounds[0][1]) / height
|
40 |
+
|
41 |
+
elif quality == 'medium':
|
42 |
+
height_range = mesh.vertices[:, 2].max() - mesh.vertices[:, 2].min()
|
43 |
+
radius = np.sqrt(mesh.vertices[:, 0]**2 + mesh.vertices[:, 1]**2)
|
44 |
+
uv_coords = np.zeros((len(mesh.vertices), 2))
|
45 |
+
uv_coords[:, 0] = np.arctan2(mesh.vertices[:, 1], mesh.vertices[:, 0]) / (2 * np.pi) + 0.5
|
46 |
+
uv_coords[:, 1] = (mesh.vertices[:, 2] - mesh.vertices[:, 2].min()) / height_range
|
47 |
+
|
48 |
+
elif quality == 'high':
|
49 |
+
radius = np.sqrt(np.sum(mesh.vertices**2, axis=1))
|
50 |
+
uv_coords = np.zeros((len(mesh.vertices), 2))
|
51 |
+
uv_coords[:, 0] = np.arctan2(mesh.vertices[:, 1], mesh.vertices[:, 0]) / (2 * np.pi) + 0.5
|
52 |
+
uv_coords[:, 1] = np.arccos(mesh.vertices[:, 2] / radius) / np.pi
|
53 |
+
else:
|
54 |
+
raise ValueError("Invalid quality parameter. Choose from 'low', 'medium', or 'high'.")
|
55 |
|
56 |
+
return uv_coords
|
57 |
|
58 |
+
def apply_texture(mesh, texture_image, uv_scale, uv_quality='medium'):
|
59 |
"""
|
60 |
+
Applies the texture to the mesh with UV scaling.
|
61 |
"""
|
62 |
+
if not hasattr(mesh.visual, 'uv') or mesh.visual.uv is None:
|
63 |
+
# If the mesh does not have UV coordinates, generate them
|
64 |
+
print("No UV coordinates found; generating default UV mapping.")
|
65 |
+
uv_coords = generate_default_uv(mesh, quality=uv_quality)
|
66 |
+
else:
|
67 |
+
uv_coords = mesh.visual.uv
|
68 |
|
69 |
+
# Ensure UV coordinates exist
|
70 |
+
if uv_coords is None:
|
71 |
+
raise ValueError("UV coordinates are missing from the mesh.")
|
72 |
+
|
73 |
+
# Apply UV scaling and ensure it is within valid range
|
74 |
uv_coords = np.clip(uv_coords * uv_scale, 0, 1)
|
75 |
|
|
|
76 |
img_width, img_height = texture_image.size
|
77 |
texture_array = np.array(texture_image)
|
78 |
|
|
|
79 |
face_colors = []
|
80 |
|
81 |
for face in mesh.faces:
|
82 |
uv_face = uv_coords[face]
|
83 |
pixel_coords = np.round(uv_face * np.array([img_width - 1, img_height - 1])).astype(int)
|
84 |
|
|
|
85 |
valid_coords = np.all((pixel_coords[:, 0] >= 0) & (pixel_coords[:, 0] < img_width) &
|
86 |
(pixel_coords[:, 1] >= 0) & (pixel_coords[:, 1] < img_height))
|
87 |
|
88 |
if valid_coords:
|
|
|
89 |
face_color = np.mean(texture_array[pixel_coords[:, 1], pixel_coords[:, 0]], axis=0)
|
90 |
+
face_colors.append(face_color / 255.0)
|
91 |
else:
|
92 |
+
face_colors.append([0.5, 0.5, 0.5])
|
|
|
93 |
|
|
|
94 |
face_colors = np.array(face_colors)
|
95 |
if len(face_colors) < len(mesh.faces):
|
96 |
face_colors = np.pad(face_colors, ((0, len(mesh.faces) - len(face_colors)), (0, 0)), 'constant', constant_values=0.5)
|
97 |
|
98 |
return face_colors
|
99 |
|
|
|
100 |
def load_glb_file(filename):
|
101 |
trimesh_scene = trimesh.load(filename)
|
102 |
if isinstance(trimesh_scene, trimesh.Scene):
|
|
|
110 |
Generates the clothing texture based on the provided prompt and number of inference steps.
|
111 |
"""
|
112 |
image = pipeline(prompt, num_inference_steps=num_inference_steps).images[0]
|
113 |
+
return image
|
|
|
114 |
|
115 |
+
def load_vrm_file(filename):
|
116 |
+
try:
|
117 |
+
vrm_data = trimesh.load(filename, file_type='glb')
|
118 |
+
if isinstance(vrm_data, trimesh.Scene):
|
119 |
+
mesh = vrm_data.dump(concatenate=True)
|
120 |
+
else:
|
121 |
+
mesh = vrm_data
|
122 |
+
except Exception as e:
|
123 |
+
raise ValueError(f"Failed to load VRM file: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
+
return mesh
|
126 |
+
|
127 |
+
def display_3d_object(obj_file, texture_image, light_intensity, ambient_intensity, color, uv_scale, transparency, uv_quality=None):
|
|
|
128 |
file_extension = obj_file.split('.')[-1].lower()
|
129 |
|
130 |
+
if file_extension == 'vrm':
|
131 |
+
mesh = load_vrm_file(obj_file)
|
132 |
+
try:
|
133 |
+
if texture_image:
|
134 |
+
face_colors = apply_texture(mesh, texture_image, uv_scale, uv_quality)
|
135 |
+
else:
|
136 |
+
face_colors = np.array([color] * len(mesh.faces))
|
137 |
+
except ValueError as e:
|
138 |
+
face_colors = np.array([color] * len(mesh.faces))
|
139 |
+
|
140 |
+
vertices = mesh.vertices
|
141 |
+
faces = mesh.faces
|
142 |
+
|
143 |
+
fig = go.Figure(data=[
|
144 |
+
go.Mesh3d(
|
145 |
+
x=vertices[:, 0],
|
146 |
+
y=vertices[:, 1],
|
147 |
+
z=vertices[:, 2],
|
148 |
+
i=faces[:, 0],
|
149 |
+
j=faces[:, 1],
|
150 |
+
k=faces[:, 2],
|
151 |
+
facecolor=face_colors,
|
152 |
+
opacity=transparency,
|
153 |
+
lighting=dict(
|
154 |
+
ambient=ambient_intensity,
|
155 |
+
diffuse=light_intensity,
|
156 |
+
specular=0.8,
|
157 |
+
roughness=0.3,
|
158 |
+
fresnel=0.1
|
159 |
+
),
|
160 |
+
lightposition=dict(
|
161 |
+
x=100,
|
162 |
+
y=200,
|
163 |
+
z=300
|
164 |
+
)
|
165 |
+
)
|
166 |
+
])
|
167 |
+
fig.update_layout(scene=dict(aspectmode='data'))
|
168 |
+
return fig
|
169 |
|
|
|
|
|
|
|
170 |
else:
|
171 |
+
if file_extension == 'obj':
|
172 |
+
mesh = trimesh.load(obj_file)
|
173 |
+
elif file_extension == 'glb':
|
174 |
+
mesh = load_glb_file(obj_file)
|
175 |
+
else:
|
176 |
+
raise ValueError("Unsupported file format. Please upload a .obj, .glb, or .vrm file.")
|
177 |
+
|
178 |
+
if texture_image:
|
179 |
+
face_colors = apply_texture(mesh, texture_image, uv_scale)
|
180 |
+
else:
|
181 |
+
face_colors = np.array([color] * len(mesh.faces))
|
182 |
+
|
183 |
+
fig = go.Figure(data=[
|
184 |
+
go.Mesh3d(
|
185 |
+
x=mesh.vertices[:, 0],
|
186 |
+
y=mesh.vertices[:, 1],
|
187 |
+
z=mesh.vertices[:, 2],
|
188 |
+
i=mesh.faces[:, 0],
|
189 |
+
j=mesh.faces[:, 1],
|
190 |
+
k=mesh.faces[:, 2],
|
191 |
+
facecolor=face_colors,
|
192 |
+
opacity=transparency,
|
193 |
+
lighting=dict(
|
194 |
+
ambient=ambient_intensity,
|
195 |
+
diffuse=light_intensity,
|
196 |
+
specular=0.8,
|
197 |
+
roughness=0.3,
|
198 |
+
fresnel=0.1
|
199 |
+
),
|
200 |
+
lightposition=dict(
|
201 |
+
x=100,
|
202 |
+
y=200,
|
203 |
+
z=300
|
204 |
+
)
|
205 |
)
|
206 |
+
])
|
207 |
+
fig.update_layout(scene=dict(aspectmode='data'))
|
|
|
208 |
|
209 |
+
return fig
|
210 |
|
211 |
def clear_texture():
|
|
|
|
|
|
|
|
|
|
|
212 |
return None
|
213 |
|
214 |
def restore_original(obj_file):
|
215 |
+
return display_3d_object(obj_file, None, 0.8, 0.5, "#D3D3D3", 1.0, 1.0)
|
|
|
|
|
|
|
216 |
|
217 |
def update_texture_display(prompt, texture_file, num_inference_steps):
|
|
|
|
|
|
|
|
|
218 |
if prompt:
|
219 |
+
image = generate_clothing_image(prompt, num_inference_steps)
|
|
|
220 |
return image
|
221 |
elif texture_file:
|
|
|
222 |
return Image.open(texture_file)
|
223 |
return None
|
224 |
|
|
|
235 |
texture_preview = gr.Image(label="Texture Preview", visible=True)
|
236 |
|
237 |
gr.Markdown("### Mapping, Lighting & Color Settings")
|
238 |
+
uv_scale_slider = gr.Slider(minimum=0.1, maximum=5, step=0.1, value=1.0, label="UV Mapping Scale")
|
239 |
+
uv_quality_dropdown = gr.Dropdown(label="UV Quality (for VRM files)", choices=['low', 'medium', 'high'], value='medium')
|
240 |
light_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.8, label="Light Intensity")
|
241 |
ambient_intensity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Ambient Intensity")
|
242 |
+
transparency_slider = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=1.0, label="Transparency")
|
243 |
color_picker = gr.ColorPicker(value="#D3D3D3", label="Object Color")
|
244 |
submit_button = gr.Button("Submit")
|
245 |
restore_button = gr.Button("Restore")
|
246 |
clear_button = gr.Button("Clear")
|
247 |
+
obj_file = gr.File(label="Upload OBJ, GLB, or VRM file", value=DEFAULT_OBJ_FILE, type='filepath')
|
248 |
|
249 |
with gr.Column(scale=2):
|
250 |
display = gr.Plot(label="3D Viewer")
|
251 |
|
252 |
+
def update_display(file, texture, uv_scale, uv_quality, light_intensity, ambient_intensity, transparency, color, num_inference_steps):
|
253 |
+
file_extension = file.split('.')[-1].lower()
|
254 |
+
texture_image = None
|
255 |
+
|
256 |
+
if texture:
|
257 |
+
texture_image = Image.open(texture)
|
258 |
|
259 |
+
if file_extension == 'vrm':
|
260 |
+
return display_3d_object(file, texture_image, light_intensity, ambient_intensity, color, uv_scale, transparency, uv_quality)
|
261 |
+
else:
|
262 |
+
return display_3d_object(file, texture_image, light_intensity, ambient_intensity, color, uv_scale, transparency)
|
263 |
+
|
264 |
+
def toggle_uv_quality_dropdown(file):
|
265 |
+
if file is None:
|
266 |
+
return gr.update(visible=False)
|
267 |
+
|
268 |
+
file_extension = file.split('.')[-1].lower()
|
269 |
+
return gr.update(visible=(file_extension == 'vrm'))
|
270 |
+
|
271 |
+
submit_button.click(
|
272 |
+
fn=update_display,
|
273 |
+
inputs=[obj_file, texture_file, uv_scale_slider, uv_quality_dropdown, light_intensity_slider, ambient_intensity_slider, transparency_slider, color_picker, num_inference_steps_slider],
|
274 |
+
outputs=display
|
275 |
+
)
|
276 |
+
|
277 |
+
obj_file.change(fn=toggle_uv_quality_dropdown, inputs=[obj_file], outputs=uv_quality_dropdown)
|
278 |
generate_button.click(fn=update_texture_display, inputs=[prompt_input, texture_file, num_inference_steps_slider], outputs=texture_preview)
|
279 |
restore_button.click(fn=restore_original, inputs=[obj_file], outputs=display)
|
280 |
clear_button.click(fn=clear_texture, outputs=texture_preview)
|
281 |
texture_file.change(fn=update_texture_display, inputs=[prompt_input, texture_file, num_inference_steps_slider], outputs=texture_preview)
|
282 |
|
283 |
+
demo.load(fn=update_display, inputs=[obj_file, texture_file, uv_scale_slider, uv_quality_dropdown, light_intensity_slider, ambient_intensity_slider, transparency_slider, color_picker, num_inference_steps_slider], outputs=display)
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
|
288 |
gr.Examples(
|
289 |
+
examples=[[DEFAULT_VRM_FILE, DEFAULT_TEXTURE],
|
290 |
+
[DEFAULT_OBJ_FILE, None],
|
291 |
+
[DEFAULT_GLB_FILE, None],
|
292 |
+
[DEFAULT_VRM_FILE2, DEFAULT_TEXTURE2],
|
293 |
+
[DEFAULT_VRM_FILE3, DEFAULT_TEXTURE3]],
|
294 |
inputs=[obj_file, texture_file],
|
295 |
label="Example Files"
|
296 |
)
|
297 |
|
298 |
demo.launch(debug=True)
|
299 |
+
|