azizinaghsh commited on
Commit
bbcccf9
1 Parent(s): b48f0c7

change character position

Browse files
Files changed (2) hide show
  1. app.py +3 -7
  2. utils/common_viz.py +9 -2
app.py CHANGED
@@ -50,7 +50,7 @@ DEFAULT_TEXT = [
50
  HEADER = """
51
 
52
  <div align="center">
53
- <h1 style='text-align: center'>E.T. the Exceptional Trajectories</h2>
54
  <a href="https://robincourant.github.io/info/"><strong>Robin Courant</strong></a>
55
 
56
  <a href="https://nicolas-dufour.github.io/"><strong>Nicolas Dufour</strong></a>
@@ -113,12 +113,8 @@ def generate(
113
  sample_id = SAMPLE_IDS[0] # Default to the first sample ID
114
  seq_feat = diffuser.net.model.clip_sequential
115
 
116
- batch = get_batch(prompt, sample_id, clip_model, dataset, seq_feat, device)
117
- batch["character_position"] = torch.tensor(
118
- [float(coord) for coord in character_position.strip("[]").split(",")],
119
- device=device,
120
- )
121
-
122
  with torch.no_grad():
123
  out = diffuser.predict_step(batch, 0)
124
 
 
50
  HEADER = """
51
 
52
  <div align="center">
53
+ <h1 style='text-align: center'>E.T. the Exceptional Trajectories (Static Character Pose</h2>
54
  <a href="https://robincourant.github.io/info/"><strong>Robin Courant</strong></a>
55
 
56
  <a href="https://nicolas-dufour.github.io/"><strong>Nicolas Dufour</strong></a>
 
113
  sample_id = SAMPLE_IDS[0] # Default to the first sample ID
114
  seq_feat = diffuser.net.model.clip_sequential
115
 
116
+ batch = get_batch(prompt, sample_id, character_position, clip_model, dataset, seq_feat, device)
117
+
 
 
 
 
118
  with torch.no_grad():
119
  out = diffuser.predict_step(batch, 0)
120
 
utils/common_viz.py CHANGED
@@ -77,6 +77,7 @@ def encode_text(
77
  def get_batch(
78
  prompt: str,
79
  sample_id: str,
 
80
  clip_model: clip.model.CLIP,
81
  dataset: MultimodalDataset,
82
  seq_feat: bool,
@@ -101,8 +102,14 @@ def get_batch(
101
  # Update batch
102
  batch["caption_raw"] = [prompt]
103
  batch["caption_feat"] = caption_feat
104
- batch['char_feat'] = torch.zeros_like(batch['char_feat'])
105
- batch['char_raw']['char_raw_feat'] = torch.zeros_like(batch['char_raw']['char_raw_feat'])
 
 
 
 
 
 
106
  batch['char_raw']['char_vertices'] = torch.zeros_like(batch['char_raw']['char_vertices'])
107
  batch['char_raw']['char_faces'] = torch.zeros_like(batch['char_raw']['char_faces'])
108
 
 
77
  def get_batch(
78
  prompt: str,
79
  sample_id: str,
80
+ character_position: str,
81
  clip_model: clip.model.CLIP,
82
  dataset: MultimodalDataset,
83
  seq_feat: bool,
 
102
  # Update batch
103
  batch["caption_raw"] = [prompt]
104
  batch["caption_feat"] = caption_feat
105
+
106
+ character_Position = torch.tensor(
107
+ [float(coord) for coord in character_position.strip("[]").split(",")],
108
+ device=device,
109
+ )
110
+
111
+ batch['char_feat'] = character_position.unsqueeze(0).repeat(1, 300).unsqueeze(0)
112
+ batch['char_raw']['char_raw_feat'] = character_position.unsqueeze(0).repeat(1, 300).unsqueeze(0)
113
  batch['char_raw']['char_vertices'] = torch.zeros_like(batch['char_raw']['char_vertices'])
114
  batch['char_raw']['char_faces'] = torch.zeros_like(batch['char_raw']['char_faces'])
115