Samet Yilmaz commited on
Commit
f9fa47c
·
1 Parent(s): aa97fb8

Change config

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -1,12 +1,16 @@
1
  from vllm import LLM, SamplingParams
2
  import gradio as gr
3
  repo_id = "mistral-community/pixtral-12b-240910" #Replace to the model you would like to use
4
- sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
 
 
5
 
6
  # @spaces.GPU #[uncomment to use ZeroGPU]
7
  def infer(image_url, prompt, progress=gr.Progress(track_tqdm=True)):
8
  # tokenize image urls and text
9
- llm = LLM(model="mistralai/Pixtral-12B-2409", load_params_config=True) # Name or path of your model
 
 
10
  messages = [
11
  {
12
  "role": "user",
 
1
  from vllm import LLM, SamplingParams
2
  import gradio as gr
3
  repo_id = "mistral-community/pixtral-12b-240910" #Replace to the model you would like to use
4
+ sampling_params = SamplingParams(max_tokens=8192, temperature=0.7)
5
+ max_tokens_per_img = 4096
6
+ max_img_per_msg = 5
7
 
8
  # @spaces.GPU #[uncomment to use ZeroGPU]
9
  def infer(image_url, prompt, progress=gr.Progress(track_tqdm=True)):
10
  # tokenize image urls and text
11
+ llm = LLM(model="mistralai/Pixtral-12B-2409",
12
+ tokenizer_mode="mistral",
13
+ limit_mm_per_prompt={"image": max_img_per_msg}) # Name or path of your model
14
  messages = [
15
  {
16
  "role": "user",