vilarin commited on
Commit
289c0ee
1 Parent(s): d518d69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -49,7 +49,7 @@ if USE_FLASH_ATTENTION:
49
  attn_implementation="flash_attention_2"
50
 
51
  model = AutoModelForCausalLM.from_pretrained(
52
- MODEL_NAME,
53
  quantization_config=quantization_config,
54
  attn_implementation=attn_implementation,
55
  torch_dtype=torch.bfloat16,
 
49
  attn_implementation="flash_attention_2"
50
 
51
  model = AutoModelForCausalLM.from_pretrained(
52
+ MODEL_ID,
53
  quantization_config=quantization_config,
54
  attn_implementation=attn_implementation,
55
  torch_dtype=torch.bfloat16,