asigalov61 commited on
Commit
b2f373f
·
verified ·
1 Parent(s): b75abab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -22,7 +22,7 @@ os.environ['USE_FLASH_ATTENTION'] = '1'
22
 
23
  import torch
24
 
25
- torch.set_float32_matmul_precision('high')
26
  torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
27
  torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
28
  torch.backends.cuda.enable_mem_efficient_sdp(True)
 
22
 
23
  import torch
24
 
25
+ torch.set_float32_matmul_precision('medium')
26
  torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
27
  torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
28
  torch.backends.cuda.enable_mem_efficient_sdp(True)