yslan commited on
Commit
8855d72
1 Parent(s): faf0d54
Files changed (2) hide show
  1. app.py +4 -0
  2. dit/dit_models_xformers.py +6 -5
app.py CHANGED
@@ -2,6 +2,10 @@ import os
2
  import torch
3
  import sys
4
  import subprocess
 
 
 
 
5
 
6
  def install_cuda_toolkit():
7
  # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
 
2
  import torch
3
  import sys
4
  import subprocess
5
+ import xformers
6
+
7
+ from xformers.components.feedforward import fused_mlp
8
+ print('xformers version: {}'.format(xformers.__version__))
9
 
10
  def install_cuda_toolkit():
11
  # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
dit/dit_models_xformers.py CHANGED
@@ -37,11 +37,12 @@ except:
37
  # from xformers import triton
38
  # import xformers.triton
39
 
40
- if torch.cuda.is_available():
41
- # from xformers.triton import FusedLayerNorm as LayerNorm # compat issue
42
- from xformers.components.activations import build_activation, Activation
43
- from xformers.components.feedforward import fused_mlp
44
- # from xformers.components.feedforward import mlp
 
45
 
46
  from ldm.modules.attention import MemoryEfficientCrossAttention, JointMemoryEfficientCrossAttention
47
 
 
37
  # from xformers import triton
38
  # import xformers.triton
39
 
40
+ # if torch.cuda.is_available():
41
+
42
+ # from xformers.triton import FusedLayerNorm as LayerNorm # compat issue
43
+ from xformers.components.activations import build_activation, Activation
44
+ from xformers.components.feedforward import fused_mlp
45
+ # from xformers.components.feedforward import mlp
46
 
47
  from ldm.modules.attention import MemoryEfficientCrossAttention, JointMemoryEfficientCrossAttention
48