Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,7 @@ from diffusers.utils.import_utils import is_xformers_available
|
|
9 |
from vlogger.STEB.model_transform import tca_transform_model, ip_scale_set, ip_transform_model
|
10 |
from diffusers.models import AutoencoderKL
|
11 |
from models.clip import TextEmbedder
|
|
|
12 |
from datasets import video_transforms
|
13 |
from torchvision import transforms
|
14 |
from utils import mask_generation_before
|
@@ -224,7 +225,7 @@ def video_generation(text, image, scfg_scale, tcfg_scale, img_cfg_scale, diffusi
|
|
224 |
print("begin generation", flush=True)
|
225 |
transform_video = transforms.Compose([
|
226 |
video_transforms.ToTensorVideo(), # TCHW
|
227 |
-
video_transforms.
|
228 |
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
229 |
])
|
230 |
video_frames = torch.zeros(16, 3, 320, 512, dtype=torch.uint8)
|
@@ -253,7 +254,6 @@ def video_prediction(text, image, scfg_scale, tcfg_scale, img_cfg_scale, prefram
|
|
253 |
print("begin generation", flush=True)
|
254 |
transform_video = transforms.Compose([
|
255 |
video_transforms.ToTensorVideo(), # TCHW
|
256 |
-
# video_transforms.WebVideo320512((320, 512)),
|
257 |
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
258 |
])
|
259 |
preframe = torch.as_tensor(convert_to_rgb(preframe)).unsqueeze(0)
|
|
|
9 |
from vlogger.STEB.model_transform import tca_transform_model, ip_scale_set, ip_transform_model
|
10 |
from diffusers.models import AutoencoderKL
|
11 |
from models.clip import TextEmbedder
|
12 |
+
sys.path.append("..")
|
13 |
from datasets import video_transforms
|
14 |
from torchvision import transforms
|
15 |
from utils import mask_generation_before
|
|
|
225 |
print("begin generation", flush=True)
|
226 |
transform_video = transforms.Compose([
|
227 |
video_transforms.ToTensorVideo(), # TCHW
|
228 |
+
video_transforms.ResizeVideo((320, 512)),
|
229 |
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
230 |
])
|
231 |
video_frames = torch.zeros(16, 3, 320, 512, dtype=torch.uint8)
|
|
|
254 |
print("begin generation", flush=True)
|
255 |
transform_video = transforms.Compose([
|
256 |
video_transforms.ToTensorVideo(), # TCHW
|
|
|
257 |
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
258 |
])
|
259 |
preframe = torch.as_tensor(convert_to_rgb(preframe)).unsqueeze(0)
|