Spaces:
Runtime error
Runtime error
HongFangzhou
commited on
Commit
•
d11c8e5
1
Parent(s):
2df2a8c
add app.py
Browse files- .gitignore +5 -0
- LICENSE +10 -0
- app.py +353 -0
- requirements.txt +25 -0
.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
assets
|
2 |
+
checkpoint
|
3 |
+
evaluations
|
4 |
+
smpl_models
|
5 |
+
EVA3D
|
LICENSE
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
S-Lab License 1.0
|
2 |
+
|
3 |
+
Copyright 2023 S-Lab
|
4 |
+
|
5 |
+
Redistribution and use for non-commercial purpose in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
6 |
+
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
7 |
+
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
8 |
+
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
9 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
10 |
+
4. In the event that redistribution and/or use for commercial purpose in source or binary forms, with or without modification is required, please contact the contributor(s) of the work.
|
app.py
ADDED
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
|
4 |
+
# os.system("git clone https://github.com/hongfz16/EVA3D.git")
|
5 |
+
sys.path.append("EVA3D")
|
6 |
+
os.system("cp -r EVA3D/assets .")
|
7 |
+
|
8 |
+
# os.system(f"{sys.executable} -m pip install -U fvcore plotly")
|
9 |
+
|
10 |
+
# import torch
|
11 |
+
# pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
12 |
+
# version_str="".join([
|
13 |
+
# f"py3{sys.version_info.minor}_cu",
|
14 |
+
# torch.version.cuda.replace(".",""),
|
15 |
+
# f"_pyt{pyt_version_str}"
|
16 |
+
# ])
|
17 |
+
|
18 |
+
# os.system(f"{sys.executable} -m pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html")
|
19 |
+
|
20 |
+
# from download_models import download_pretrained_models
|
21 |
+
|
22 |
+
# download_pretrained_models()
|
23 |
+
|
24 |
+
import os
|
25 |
+
import torch
|
26 |
+
import trimesh
|
27 |
+
import imageio
|
28 |
+
import pickle
|
29 |
+
import numpy as np
|
30 |
+
from munch import *
|
31 |
+
from PIL import Image
|
32 |
+
from tqdm import tqdm
|
33 |
+
from torch.nn import functional as F
|
34 |
+
from torch.utils import data
|
35 |
+
from torchvision import utils
|
36 |
+
from torchvision import transforms
|
37 |
+
from skimage.measure import marching_cubes
|
38 |
+
from scipy.spatial import Delaunay
|
39 |
+
from scipy.spatial.transform import Rotation as R
|
40 |
+
from options import BaseOptions
|
41 |
+
from model import VoxelHumanGenerator as Generator
|
42 |
+
from dataset import DeepFashionDataset, DemoDataset
|
43 |
+
from utils import (
|
44 |
+
generate_camera_params,
|
45 |
+
align_volume,
|
46 |
+
extract_mesh_with_marching_cubes,
|
47 |
+
xyz2mesh,
|
48 |
+
requires_grad,
|
49 |
+
create_mesh_renderer,
|
50 |
+
create_cameras
|
51 |
+
)
|
52 |
+
from pytorch3d.io import load_objs_as_meshes, load_obj
|
53 |
+
from pytorch3d.structures import Meshes
|
54 |
+
from pytorch3d.renderer import (
|
55 |
+
FoVPerspectiveCameras, look_at_view_transform, look_at_rotation,
|
56 |
+
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
|
57 |
+
SoftSilhouetteShader, HardPhongShader, PointLights, TexturesVertex,
|
58 |
+
)
|
59 |
+
|
60 |
+
torch.random.manual_seed(8888)
|
61 |
+
import random
|
62 |
+
random.seed(8888)
|
63 |
+
|
64 |
+
panning_angle = np.pi / 3
|
65 |
+
|
66 |
+
def sample_latent(opt, device):
|
67 |
+
return
|
68 |
+
|
69 |
+
def generate_rgb(opt, g_ema, device, mean_latent, sample_z, sample_trans, sample_beta, sample_theta, sample_cam_extrinsics, sample_focals):
|
70 |
+
requires_grad(g_ema, False)
|
71 |
+
g_ema.is_train = False
|
72 |
+
g_ema.train_renderer = False
|
73 |
+
img_list = []
|
74 |
+
for k in range(3):
|
75 |
+
if k == 0:
|
76 |
+
delta = R.from_rotvec(np.pi/8 * np.array([0, 1, 0]))
|
77 |
+
elif k == 2:
|
78 |
+
delta = R.from_rotvec(-np.pi/8 * np.array([0, 1, 0]))
|
79 |
+
else:
|
80 |
+
delta = R.from_rotvec(0 * np.array([0, 1, 0]))
|
81 |
+
r = R.from_rotvec(sample_theta[0, :3].cpu().numpy())
|
82 |
+
new_r = delta * r
|
83 |
+
new_sample_theta = sample_theta.clone()
|
84 |
+
new_sample_theta[0, :3] = torch.from_numpy(new_r.as_rotvec()).to(device)
|
85 |
+
|
86 |
+
with torch.no_grad():
|
87 |
+
j = 0
|
88 |
+
chunk = 1
|
89 |
+
out = g_ema([sample_z[j:j+chunk]],
|
90 |
+
sample_cam_extrinsics[j:j+chunk],
|
91 |
+
sample_focals[j:j+chunk],
|
92 |
+
sample_beta[j:j+chunk],
|
93 |
+
new_sample_theta[j:j+chunk],
|
94 |
+
sample_trans[j:j+chunk],
|
95 |
+
truncation=opt.truncation_ratio,
|
96 |
+
truncation_latent=mean_latent,
|
97 |
+
return_eikonal=False,
|
98 |
+
return_normal=False,
|
99 |
+
return_mask=False,
|
100 |
+
fix_viewdir=True)
|
101 |
+
|
102 |
+
rgb_images_thumbs = out[1].detach().cpu()[..., :3].permute(0, 3, 1, 2)
|
103 |
+
g_ema.zero_grad()
|
104 |
+
img_list.append(rgb_images_thumbs)
|
105 |
+
|
106 |
+
utils.save_image(torch.cat(img_list, 0),
|
107 |
+
os.path.join(opt.results_dst_dir, 'images_paper_fig','{}.png'.format(str(0).zfill(7))),
|
108 |
+
nrow=3,
|
109 |
+
normalize=True,
|
110 |
+
range=(-1, 1),
|
111 |
+
padding=0,)
|
112 |
+
|
113 |
+
def generate_mesh(opt, g_ema, device, mean_latent, sample_z, sample_trans, sample_beta, sample_theta, sample_cam_extrinsics, sample_focals):
|
114 |
+
latent = g_ema.styles_and_noise_forward(sample_z[:1], None, opt.truncation_ratio,
|
115 |
+
mean_latent, False)
|
116 |
+
|
117 |
+
sdf = g_ema.renderer.marching_cube_posed(latent[0], sample_beta, sample_theta, resolution=350, size=1.4).detach()
|
118 |
+
marching_cubes_mesh, _, _ = extract_mesh_with_marching_cubes(sdf, level_set=0)
|
119 |
+
marching_cubes_mesh = trimesh.smoothing.filter_humphrey(marching_cubes_mesh, beta=0.2, iterations=5)
|
120 |
+
# marching_cubes_mesh_filename = os.path.join(opt.results_dst_dir,'marching_cubes_meshes_posed','sample_{}_marching_cubes_mesh.obj'.format(0))
|
121 |
+
# with open(marching_cubes_mesh_filename, 'w') as f:
|
122 |
+
# marching_cubes_mesh.export(f,file_type='obj')
|
123 |
+
return marching_cubes_mesh
|
124 |
+
|
125 |
+
def generate_video(opt, g_ema, device, mean_latent, sample_z, sample_trans, sample_beta, sample_theta, sample_cam_extrinsics, sample_focals):
|
126 |
+
video_list = []
|
127 |
+
for k in tqdm(range(120)):
|
128 |
+
if k < 30:
|
129 |
+
angle = (panning_angle / 2) * (k / 30)
|
130 |
+
elif k >= 30 and k < 90:
|
131 |
+
angle = panning_angle / 2 - panning_angle * ((k - 30) / 60)
|
132 |
+
else:
|
133 |
+
angle = -panning_angle / 2 * ((120 - k) / 30)
|
134 |
+
delta = R.from_rotvec(angle * np.array([0, 1, 0]))
|
135 |
+
r = R.from_rotvec(sample_theta[0, :3].cpu().numpy())
|
136 |
+
new_r = delta * r
|
137 |
+
new_sample_theta = sample_theta.clone()
|
138 |
+
new_sample_theta[0, :3] = torch.from_numpy(new_r.as_rotvec()).to(device)
|
139 |
+
with torch.no_grad():
|
140 |
+
j = 0
|
141 |
+
chunk = 1
|
142 |
+
out = g_ema([sample_z[j:j+chunk]],
|
143 |
+
sample_cam_extrinsics[j:j+chunk],
|
144 |
+
sample_focals[j:j+chunk],
|
145 |
+
sample_beta[j:j+chunk],
|
146 |
+
new_sample_theta[j:j+chunk],
|
147 |
+
sample_trans[j:j+chunk],
|
148 |
+
truncation=opt.truncation_ratio,
|
149 |
+
truncation_latent=mean_latent,
|
150 |
+
return_eikonal=False,
|
151 |
+
return_normal=False,
|
152 |
+
return_mask=False,
|
153 |
+
fix_viewdir=True)
|
154 |
+
rgb_images_thumbs = out[1].detach().cpu()[..., :3]
|
155 |
+
g_ema.zero_grad()
|
156 |
+
video_list.append((rgb_images_thumbs.numpy() + 1) / 2. * 255. + 0.5)
|
157 |
+
all_img = np.concatenate(video_list, 0).astype(np.uint8)
|
158 |
+
imageio.mimwrite(os.path.join(opt.results_dst_dir, 'images_paper_video', 'video_{}.mp4'.format(str(0).zfill(7))), all_img, fps=30, quality=8)
|
159 |
+
|
160 |
+
def setup():
|
161 |
+
device='cuda' if torch.cuda.is_available() else 'cpu'
|
162 |
+
opt = BaseOptions().parse()
|
163 |
+
|
164 |
+
opt.training.batch = 1
|
165 |
+
opt.training.chunk = 1
|
166 |
+
opt.experiment.expname = '512x256_deepfashion'
|
167 |
+
opt.dataset.dataset_path = 'demodataset'
|
168 |
+
opt.rendering.depth = 5
|
169 |
+
opt.rendering.width = 128
|
170 |
+
opt.model.style_dim = 128
|
171 |
+
opt.model.renderer_spatial_output_dim = [512, 256]
|
172 |
+
opt.training.no_sphere_init = True
|
173 |
+
opt.rendering.input_ch_views = 3
|
174 |
+
opt.rendering.white_bg = True
|
175 |
+
opt.model.voxhuman_name = 'eva3d_deepfashion'
|
176 |
+
opt.training.deltasdf = True
|
177 |
+
opt.rendering.N_samples = 28
|
178 |
+
opt.experiment.ckpt = '420000'
|
179 |
+
opt.inference.identities = 1
|
180 |
+
opt.inference.truncation_ratio = 0.5
|
181 |
+
|
182 |
+
opt.model.is_test = True
|
183 |
+
opt.model.freeze_renderer = False
|
184 |
+
opt.rendering.no_features_output = True
|
185 |
+
opt.rendering.offset_sampling = True
|
186 |
+
opt.rendering.static_viewdirs = True
|
187 |
+
opt.rendering.force_background = True
|
188 |
+
opt.rendering.perturb = 0
|
189 |
+
opt.inference.size = opt.model.size
|
190 |
+
opt.inference.camera = opt.camera
|
191 |
+
opt.inference.renderer_output_size = opt.model.renderer_spatial_output_dim
|
192 |
+
opt.inference.style_dim = opt.model.style_dim
|
193 |
+
opt.inference.project_noise = opt.model.project_noise
|
194 |
+
opt.inference.return_xyz = opt.rendering.return_xyz
|
195 |
+
|
196 |
+
checkpoints_dir = os.path.join('checkpoint', opt.experiment.expname, 'volume_renderer')
|
197 |
+
checkpoint_path = os.path.join(checkpoints_dir,
|
198 |
+
'models_{}.pt'.format(opt.experiment.ckpt.zfill(7)))
|
199 |
+
# define results directory name
|
200 |
+
result_model_dir = 'iter_{}'.format(opt.experiment.ckpt.zfill(7))
|
201 |
+
|
202 |
+
# create results directory
|
203 |
+
results_dir_basename = os.path.join(opt.inference.results_dir, opt.experiment.expname)
|
204 |
+
opt.inference.results_dst_dir = os.path.join(results_dir_basename, result_model_dir)
|
205 |
+
if opt.inference.fixed_camera_angles:
|
206 |
+
opt.inference.results_dst_dir = os.path.join(opt.inference.results_dst_dir, 'fixed_angles')
|
207 |
+
else:
|
208 |
+
opt.inference.results_dst_dir = os.path.join(opt.inference.results_dst_dir, 'random_angles')
|
209 |
+
os.makedirs(opt.inference.results_dst_dir, exist_ok=True)
|
210 |
+
os.makedirs(os.path.join(opt.inference.results_dst_dir, 'images_paper_fig'), exist_ok=True)
|
211 |
+
os.makedirs(os.path.join(opt.inference.results_dst_dir, 'images_paper_video'), exist_ok=True)
|
212 |
+
os.makedirs(os.path.join(opt.inference.results_dst_dir, 'marching_cubes_meshes_posed'), exist_ok=True)
|
213 |
+
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
|
214 |
+
|
215 |
+
# load generation model
|
216 |
+
g_ema = Generator(opt.model, opt.rendering, full_pipeline=False, voxhuman_name=opt.model.voxhuman_name).to(device)
|
217 |
+
pretrained_weights_dict = checkpoint["g_ema"]
|
218 |
+
model_dict = g_ema.state_dict()
|
219 |
+
for k, v in pretrained_weights_dict.items():
|
220 |
+
if v.size() == model_dict[k].size():
|
221 |
+
model_dict[k] = v
|
222 |
+
else:
|
223 |
+
print(k)
|
224 |
+
|
225 |
+
g_ema.load_state_dict(model_dict)
|
226 |
+
|
227 |
+
transform = transforms.Compose(
|
228 |
+
[transforms.ToTensor(),
|
229 |
+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)])
|
230 |
+
|
231 |
+
if 'deepfashion' in opt.dataset.dataset_path:
|
232 |
+
file_list = '/mnt/lustre/fzhong/smplify-x/deepfashion_train_list/deepfashion_train_list_MAN.txt'
|
233 |
+
elif '20w_fashion' in opt.dataset.dataset_path:
|
234 |
+
file_list = '/mnt/lustre/fzhong/mmhuman3d/20w_fashion_result/nondress_flist.txt'
|
235 |
+
else:
|
236 |
+
file_list = None
|
237 |
+
if file_list:
|
238 |
+
dataset = DeepFashionDataset(opt.dataset.dataset_path, transform, opt.model.size,
|
239 |
+
opt.model.renderer_spatial_output_dim, file_list)
|
240 |
+
else:
|
241 |
+
dataset = DemoDataset()
|
242 |
+
|
243 |
+
# get the mean latent vector for g_ema
|
244 |
+
if opt.inference.truncation_ratio < 1:
|
245 |
+
with torch.no_grad():
|
246 |
+
mean_latent = g_ema.mean_latent(opt.inference.truncation_mean, device)
|
247 |
+
else:
|
248 |
+
mean_latent = None
|
249 |
+
|
250 |
+
g_ema.renderer.is_train = False
|
251 |
+
g_ema.renderer.perturb = 0
|
252 |
+
|
253 |
+
# generate(opt.inference, dataset, g_ema, device, mean_latent, opt.rendering.render_video)
|
254 |
+
|
255 |
+
sample_trans, sample_beta, sample_theta = dataset.sample_smpl_param(1, device, val=False)
|
256 |
+
sample_cam_extrinsics, sample_focals = dataset.get_camera_extrinsics(1, device, val=False)
|
257 |
+
|
258 |
+
torch.randn(1, opt.inference.style_dim, device=device)
|
259 |
+
|
260 |
+
return opt.inference, g_ema, device, mean_latent, torch.randn(1, opt.inference.style_dim, device=device), \
|
261 |
+
sample_trans, sample_beta, sample_theta, sample_cam_extrinsics, sample_focals
|
262 |
+
|
263 |
+
import gradio as gr
|
264 |
+
import plotly.graph_objects as go
|
265 |
+
from PIL import Image
|
266 |
+
|
267 |
+
setup_list = None
|
268 |
+
|
269 |
+
def get_video():
|
270 |
+
global setup_list
|
271 |
+
if setup_list is None:
|
272 |
+
setup_list = list(setup())
|
273 |
+
generate_video(*setup_list)
|
274 |
+
torch.cuda.empty_cache()
|
275 |
+
path = 'evaluations/512x256_deepfashion/iter_0420000/random_angles/images_paper_video/video_0000000.mp4'
|
276 |
+
return path
|
277 |
+
|
278 |
+
def get_mesh():
|
279 |
+
global setup_list
|
280 |
+
if setup_list is None:
|
281 |
+
setup_list = list(setup())
|
282 |
+
setup_list[4] = torch.randn(1, setup_list[0].style_dim, device=setup_list[2])
|
283 |
+
generate_rgb(*setup_list)
|
284 |
+
mesh = generate_mesh(*setup_list)
|
285 |
+
torch.cuda.empty_cache()
|
286 |
+
|
287 |
+
x=np.asarray(mesh.vertices).T[0]
|
288 |
+
y=np.asarray(mesh.vertices).T[1]
|
289 |
+
z=np.asarray(mesh.vertices).T[2]
|
290 |
+
|
291 |
+
i=np.asarray(mesh.faces).T[0]
|
292 |
+
j=np.asarray(mesh.faces).T[1]
|
293 |
+
k=np.asarray(mesh.faces).T[2]
|
294 |
+
fig = go.Figure(go.Mesh3d(x=x, y=y, z=z,
|
295 |
+
i=i, j=j, k=k,
|
296 |
+
color="lightpink",))
|
297 |
+
# # flatshading=True,
|
298 |
+
# lighting=dict(ambient=0.5,
|
299 |
+
# diffuse=1,
|
300 |
+
# fresnel=4,
|
301 |
+
# specular=0.5,
|
302 |
+
# roughness=0.05,
|
303 |
+
# facenormalsepsilon=0,
|
304 |
+
# vertexnormalsepsilon=0),
|
305 |
+
# lightposition=dict(x=100,
|
306 |
+
# y=100,
|
307 |
+
# z=1000)))
|
308 |
+
path='evaluations/512x256_deepfashion/iter_0420000/random_angles/images_paper_fig/0000000.png'
|
309 |
+
|
310 |
+
image=Image.open(path)
|
311 |
+
|
312 |
+
return fig,image
|
313 |
+
|
314 |
+
markdown=f'''
|
315 |
+
# EVA3D: Compositional 3D Human Generation from 2D Image Collections
|
316 |
+
Authored by Fangzhou Hong, Zhaoxi Chen, Yushi Lan, Liang Pan, Ziwei Liu
|
317 |
+
The space demo for the ICLR 2023 Spotlight paper "EVA3D: Compositional 3D Human Generation from 2D Image Collections".
|
318 |
+
|
319 |
+
### Useful links:
|
320 |
+
- [Official Github Repo](https://github.com/hongfz16/EVA3D)
|
321 |
+
- [Project Page](https://hongfz16.github.io/projects/EVA3D.html)
|
322 |
+
- [arXiv Link](https://arxiv.org/abs/2210.04888)
|
323 |
+
|
324 |
+
Licensed under the S-Lab License.
|
325 |
+
|
326 |
+
First use button "Generate RGB & Mesh" to randomly sample a 3D human. (~6s) Then push button "Generate Video" to generate a panning video of the generated human. (~30s)
|
327 |
+
'''
|
328 |
+
|
329 |
+
with gr.Blocks() as demo:
|
330 |
+
with gr.Row():
|
331 |
+
with gr.Column():
|
332 |
+
gr.Markdown(markdown)
|
333 |
+
with gr.Column():
|
334 |
+
with gr.Row():
|
335 |
+
with gr.Column():
|
336 |
+
image=gr.Image(type="pil",shape=(512,256*3))
|
337 |
+
with gr.Row():
|
338 |
+
with gr.Column():
|
339 |
+
mesh = gr.Plot()
|
340 |
+
with gr.Column():
|
341 |
+
video=gr.Video()
|
342 |
+
# with gr.Row():
|
343 |
+
# numberoframes = gr.Slider( minimum=30, maximum=250,label='Number Of Frame For Video Generation')
|
344 |
+
# model_name=gr.Dropdown(choices=["ffhq","afhq"],label="Choose Model Type")
|
345 |
+
# mesh_type=gr.Dropdown(choices=["DepthMesh","Marching Cubes"],label="Choose Mesh Type")
|
346 |
+
with gr.Row():
|
347 |
+
btn = gr.Button(value="Generate RGB & Mesh")
|
348 |
+
btn_2=gr.Button(value="Generate Video")
|
349 |
+
|
350 |
+
btn.click(get_mesh,[],[mesh,image])
|
351 |
+
btn_2.click(get_video,[],[video])
|
352 |
+
|
353 |
+
demo.launch(debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu116
|
2 |
+
torch
|
3 |
+
torchvision
|
4 |
+
plotly
|
5 |
+
gradio
|
6 |
+
chumpy==0.70
|
7 |
+
imageio==2.16.1
|
8 |
+
matplotlib==3.5.1
|
9 |
+
numpy==1.21.2
|
10 |
+
opencv-python==4.5.5.64
|
11 |
+
pillow==9.0.1
|
12 |
+
scikit-image==0.19.2
|
13 |
+
scikit-learn==1.0.2
|
14 |
+
scikit-video==1.1.11
|
15 |
+
scipy==1.8.0
|
16 |
+
smplx==0.1.28
|
17 |
+
trimesh==3.10.7
|
18 |
+
sklearn
|
19 |
+
lmdb
|
20 |
+
ninja
|
21 |
+
requests
|
22 |
+
tqdm
|
23 |
+
configargparse
|
24 |
+
munch
|
25 |
+
imageio_ffmpeg
|