linyq commited on
Commit
fe12049
1 Parent(s): 889f605

Update modeling_llavaqwen.py

Browse files
Files changed (1) hide show
  1. modeling_llavaqwen.py +0 -2
modeling_llavaqwen.py CHANGED
@@ -1521,9 +1521,7 @@ faster_llama_rmsnorm = None
1521
  if is_flash_attn_2_available():
1522
  from flash_attn import flash_attn_func, flash_attn_varlen_func
1523
  from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
1524
- from flash_attn.ops.rms_norm import rms_norm
1525
 
1526
- faster_llama_rmsnorm = rms_norm
1527
 
1528
  _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
1529
 
 
1521
  if is_flash_attn_2_available():
1522
  from flash_attn import flash_attn_func, flash_attn_varlen_func
1523
  from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
 
1524
 
 
1525
 
1526
  _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
1527