Chaerin5 commited on
Commit
35c713b
·
1 Parent(s): e277aac

enable zerogpu

Browse files
Files changed (2) hide show
  1. app.py +1 -2
  2. vqvae.py +2 -0
app.py CHANGED
@@ -254,7 +254,7 @@ hands = mp_hands.Hands(
254
  min_detection_confidence=0.1,
255
  )
256
 
257
- @spaces.GPU(duration=120)
258
  def get_ref_anno(ref):
259
  if ref is None:
260
  return (
@@ -359,7 +359,6 @@ def get_ref_anno(ref):
359
  target_size=opts.image_size,
360
  latent_size=opts.latent_size,
361
  )
362
- print("ready to go to autoencoder")
363
  latent = opts.latent_scaling_factor * autoencoder.encode(image).sample()
364
  if not REF_POSE_MASK:
365
  heatmaps = torch.zeros_like(heatmaps)
 
254
  min_detection_confidence=0.1,
255
  )
256
 
257
+ # @spaces.GPU(duration=120)
258
  def get_ref_anno(ref):
259
  if ref is None:
260
  return (
 
359
  target_size=opts.image_size,
360
  latent_size=opts.latent_size,
361
  )
 
362
  latent = opts.latent_scaling_factor * autoencoder.encode(image).sample()
363
  if not REF_POSE_MASK:
364
  heatmaps = torch.zeros_like(heatmaps)
vqvae.py CHANGED
@@ -20,6 +20,7 @@ from typing import List
20
  import torch
21
  import torch.nn.functional as F
22
  from torch import nn
 
23
 
24
 
25
  class Autoencoder(nn.Module):
@@ -72,6 +73,7 @@ class Autoencoder(nn.Module):
72
  # Decode the image of shape `[batch_size, channels, height, width]`
73
  return self.decoder(z)
74
 
 
75
  def forward(self, x):
76
  posterior = self.encode(x)
77
  z = posterior.sample()
 
20
  import torch
21
  import torch.nn.functional as F
22
  from torch import nn
23
+ import spaces
24
 
25
 
26
  class Autoencoder(nn.Module):
 
73
  # Decode the image of shape `[batch_size, channels, height, width]`
74
  return self.decoder(z)
75
 
76
+ @spaces.GPU(duration=120)
77
  def forward(self, x):
78
  posterior = self.encode(x)
79
  z = posterior.sample()