Locutusque commited on
Commit
efa692b
·
verified ·
1 Parent(s): 991b767

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -8,7 +8,7 @@ import spaces
8
  # Install flash-attn
9
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
10
  # Initialize the model pipeline
11
- generator = pipeline('text-generation', model='mistralai/Mistral-7B-v0.1', torch_dtype=torch.bfloat16, use_flash_attention_2=True)
12
  @spaces.GPU
13
  def generate_text(prompt, temperature, top_p, top_k, repetition_penalty, max_length):
14
  # Generate text using the model
 
8
  # Install flash-attn
9
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
10
  # Initialize the model pipeline
11
+ generator = pipeline('text-generation', model='mistralai/Mistral-7B-v0.1', torch_dtype=torch.bfloat16)
12
  @spaces.GPU
13
  def generate_text(prompt, temperature, top_p, top_k, repetition_penalty, max_length):
14
  # Generate text using the model