Spaces:
Runtime error
Runtime error
onescotch
commited on
Commit
•
d78ebe8
1
Parent(s):
85d8725
change back to A10G
Browse files- app.py +5 -5
- requirements.txt +2 -2
app.py
CHANGED
@@ -13,7 +13,7 @@ try:
|
|
13 |
except:
|
14 |
os.system('pip install /home/user/app/main/transformer_utils')
|
15 |
hf_hub_download(repo_id="caizhongang/SMPLer-X", filename="smpler_x_h32.pth.tar", local_dir="/home/user/app/pretrained_models")
|
16 |
-
os.system('cp -rf /home/user/app/assets/conversions.py /
|
17 |
DEFAULT_MODEL='smpler_x_h32'
|
18 |
OUT_FOLDER = '/home/user/app/demo_out'
|
19 |
os.makedirs(OUT_FOLDER, exist_ok=True)
|
@@ -24,13 +24,13 @@ print(torch.version.cuda)
|
|
24 |
index = torch.cuda.current_device()
|
25 |
print(index)
|
26 |
print(torch.cuda.get_device_name(index))
|
27 |
-
|
28 |
-
|
29 |
|
30 |
@spaces.GPU(enable_queue=True, duration=300)
|
31 |
def infer(video_input, in_threshold=0.5, num_people="Single person", render_mesh=False):
|
32 |
-
from main.inference import Inferer
|
33 |
-
inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
|
34 |
os.system(f'rm -rf {OUT_FOLDER}/*')
|
35 |
multi_person = False if (num_people == "Single person") else True
|
36 |
cap = cv2.VideoCapture(video_input)
|
|
|
13 |
except:
|
14 |
os.system('pip install /home/user/app/main/transformer_utils')
|
15 |
hf_hub_download(repo_id="caizhongang/SMPLer-X", filename="smpler_x_h32.pth.tar", local_dir="/home/user/app/pretrained_models")
|
16 |
+
os.system('cp -rf /home/user/app/assets/conversions.py /home/user/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torchgeometry/core/conversions.py')
|
17 |
DEFAULT_MODEL='smpler_x_h32'
|
18 |
OUT_FOLDER = '/home/user/app/demo_out'
|
19 |
os.makedirs(OUT_FOLDER, exist_ok=True)
|
|
|
24 |
index = torch.cuda.current_device()
|
25 |
print(index)
|
26 |
print(torch.cuda.get_device_name(index))
|
27 |
+
from main.inference import Inferer
|
28 |
+
inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
|
29 |
|
30 |
@spaces.GPU(enable_queue=True, duration=300)
|
31 |
def infer(video_input, in_threshold=0.5, num_people="Single person", render_mesh=False):
|
32 |
+
# from main.inference import Inferer
|
33 |
+
# inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
|
34 |
os.system(f'rm -rf {OUT_FOLDER}/*')
|
35 |
multi_person = False if (num_people == "Single person") else True
|
36 |
cap = cv2.VideoCapture(video_input)
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
--extra-index-url https://download.openmmlab.com/mmcv/dist/
|
2 |
-
https://download.openmmlab.com/mmcv/dist/
|
3 |
|
4 |
scikit-image
|
5 |
scipy
|
|
|
1 |
+
--extra-index-url https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html
|
2 |
+
https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/mmcv-2.1.0-cp39-cp39-manylinux1_x86_64.whl
|
3 |
|
4 |
scikit-image
|
5 |
scipy
|