Locutusque commited on
Commit
f09eac6
1 Parent(s): daecaae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -9,7 +9,7 @@ import os
9
  # Install flash-attn
10
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
11
  # Initialize the model pipeline
12
- generator = pipeline('text-generation', model='Locutusque/Hyperion-1.5-Mistral-7B', torch_dtype=torch.bfloat16, token=os.environ["HF"])
13
  @spaces.GPU
14
  def generate_text(prompt, temperature, top_p, top_k, repetition_penalty, max_length):
15
  # Generate text using the model
@@ -43,8 +43,8 @@ iface = gr.Interface(
43
  gr.Slider(minimum=5, maximum=4096, step=5, value=1024, label="Max Length")
44
  ],
45
  outputs=gr.Textbox(label="Generated Text"),
46
- title="Hyperion-1.5-Mistral-7B",
47
- description="Try out the Hyperion-1.5-Mistral-7B model for free! This is a preview version, and the model will be released soon"
48
  )
49
 
50
  iface.launch()
 
9
  # Install flash-attn
10
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
11
  # Initialize the model pipeline
12
+ generator = pipeline('text-generation', model='Locutusque/Hyperion-2.0-Mistral-7B', torch_dtype=torch.bfloat16, token=os.environ["HF"])
13
  @spaces.GPU
14
  def generate_text(prompt, temperature, top_p, top_k, repetition_penalty, max_length):
15
  # Generate text using the model
 
43
  gr.Slider(minimum=5, maximum=4096, step=5, value=1024, label="Max Length")
44
  ],
45
  outputs=gr.Textbox(label="Generated Text"),
46
+ title="Hyperion-2.0-Mistral-7B",
47
+ description="Try out the Hyperion-2.0-Mistral-7B model for free! This is a preview version, and the model will be released soon"
48
  )
49
 
50
  iface.launch()