QinOwen commited on
Commit
a48ca5b
1 Parent(s): 7c92e57

debug-seed

Browse files
VADER-VideoCrafter/scripts/main/train_t2v_lora.py CHANGED
@@ -738,7 +738,7 @@ def run_training(args, model, **kwargs):
738
  # load the pretrained LoRA model
739
  peft.set_peft_model_state_dict(peft_model, torch.load(args.lora_ckpt_path))
740
 
741
-
742
  print("precision: ", peft_model.dtype)
743
  # precision of first_stage_model
744
  print("precision of first_stage_model: ", peft_model.first_stage_model.dtype)
@@ -929,7 +929,6 @@ def main_fn(prompt, lora_model, lora_rank, seed=200, height=320, width=512, unco
929
  seed_everything(args.seed)
930
  seed_everything_self(args.seed)
931
 
932
- print(torch.rand(100).sum())
933
 
934
  video_path = run_training(args, model)
935
 
 
738
  # load the pretrained LoRA model
739
  peft.set_peft_model_state_dict(peft_model, torch.load(args.lora_ckpt_path))
740
 
741
+ print('random seed debug: ', torch.rand(100).sum())
742
  print("precision: ", peft_model.dtype)
743
  # precision of first_stage_model
744
  print("precision of first_stage_model: ", peft_model.first_stage_model.dtype)
 
929
  seed_everything(args.seed)
930
  seed_everything_self(args.seed)
931
 
 
932
 
933
  video_path = run_training(args, model)
934
 
app.py CHANGED
@@ -22,7 +22,7 @@ examples = [
22
 
23
  model = setup_model()
24
 
25
- @spaces.GPU(duration=120)
26
  def gradio_main_fn(prompt, lora_model, lora_rank, seed, height, width, unconditional_guidance_scale, ddim_steps, ddim_eta,
27
  frames, savefps):
28
  global model
 
22
 
23
  model = setup_model()
24
 
25
+ @spaces.GPU(duration=180)
26
  def gradio_main_fn(prompt, lora_model, lora_rank, seed, height, width, unconditional_guidance_scale, ddim_steps, ddim_eta,
27
  frames, savefps):
28
  global model