Spaces:
Sleeping
Sleeping
fix: use prebuilt wheel files.
Browse files
app.py
CHANGED
@@ -87,20 +87,20 @@ def setup_runtime_env():
|
|
87 |
logging.info("CUDA is available: %s" % torch.cuda.is_available())
|
88 |
logging.info("CUDA Device Capability: %s" % (torch.cuda.get_device_capability(),))
|
89 |
|
90 |
-
# Install Pre-compiled CUDA extensions (
|
91 |
# Ref: https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/110
|
92 |
-
|
93 |
-
# ext_dir = os.path.join(os.path.dirname(__file__), "wheels")
|
94 |
-
# for e in os.listdir(ext_dir):
|
95 |
-
# logging.info("Installing Extensions from %s" % e)
|
96 |
-
# subprocess.call(
|
97 |
-
# ["pip", "install", os.path.join(ext_dir, e)], stderr=subprocess.STDOUT
|
98 |
-
# )
|
99 |
-
# Compile CUDA extensions
|
100 |
-
ext_dir = os.path.join(os.path.dirname(__file__), "citydreamer", "extensions")
|
101 |
for e in os.listdir(ext_dir):
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
logging.info("Installed Python Packages: %s" % _get_output(["pip", "list"]))
|
106 |
|
@@ -183,13 +183,13 @@ if __name__ == "__main__":
|
|
183 |
format="[%(levelname)s] %(asctime)s %(message)s", level=logging.INFO
|
184 |
)
|
185 |
logging.info("Environment Variables: %s" % os.environ)
|
186 |
-
if _get_output(["nvcc", "--version"]) is None:
|
187 |
-
|
188 |
-
|
189 |
-
else:
|
190 |
-
|
191 |
|
192 |
-
logging.info("
|
193 |
setup_runtime_env()
|
194 |
|
195 |
logging.info("Downloading pretrained models...")
|
|
|
87 |
logging.info("CUDA is available: %s" % torch.cuda.is_available())
|
88 |
logging.info("CUDA Device Capability: %s" % (torch.cuda.get_device_capability(),))
|
89 |
|
90 |
+
# Install Pre-compiled CUDA extensions (Fallback to this solution on 12/31/24)
|
91 |
# Ref: https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/110
|
92 |
+
ext_dir = os.path.join(os.path.dirname(__file__), "wheels")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
for e in os.listdir(ext_dir):
|
94 |
+
logging.info("Installing Extensions from %s" % e)
|
95 |
+
subprocess.call(
|
96 |
+
["pip", "install", os.path.join(ext_dir, e)], stderr=subprocess.STDOUT
|
97 |
+
)
|
98 |
+
# Compile CUDA extensions
|
99 |
+
# Update on 12/31/24: No module named 'torch'. But it is installed and listed by `pip list`
|
100 |
+
# ext_dir = os.path.join(os.path.dirname(__file__), "citydreamer", "extensions")
|
101 |
+
# for e in os.listdir(ext_dir):
|
102 |
+
# if os.path.isdir(os.path.join(ext_dir, e)):
|
103 |
+
# subprocess.call(["pip", "install", "."], cwd=os.path.join(ext_dir, e))
|
104 |
|
105 |
logging.info("Installed Python Packages: %s" % _get_output(["pip", "list"]))
|
106 |
|
|
|
183 |
format="[%(levelname)s] %(asctime)s %(message)s", level=logging.INFO
|
184 |
)
|
185 |
logging.info("Environment Variables: %s" % os.environ)
|
186 |
+
# if _get_output(["nvcc", "--version"]) is None:
|
187 |
+
# logging.info("Installing CUDA toolkit...")
|
188 |
+
# install_cuda_toolkit()
|
189 |
+
# else:
|
190 |
+
# logging.info("Detected CUDA: %s" % _get_output(["nvcc", "--version"]))
|
191 |
|
192 |
+
logging.info("Installing CUDA extensions...")
|
193 |
setup_runtime_env()
|
194 |
|
195 |
logging.info("Downloading pretrained models...")
|
wheels/extrude_tensor-1.0.0-cp310-cp310-linux_x86_64.whl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed313f42a9ded9fe5fb3a73fdb2218fa71fde4ced00fc76e027cba24faf5ad34
|
3 |
+
size 67708
|
wheels/grid_encoder-1.0.0-cp310-cp310-linux_x86_64.whl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd18d20dcf65f5895280b04f77dbca52bca473ffff1b87d8f1aadfaf4cf91e97
|
3 |
+
size 2345077
|
wheels/voxrender-1.0.0-cp310-cp310-linux_x86_64.whl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28751d39899af5bc1db6905405abf92a8b72f5701331584fe794d688ba037ee1
|
3 |
+
size 593621
|