DongfuJiang commited on
Commit
7a6af6e
1 Parent(s): 4ad4a2a
Files changed (1) hide show
  1. app_high_res.py +3 -2
app_high_res.py CHANGED
@@ -12,7 +12,8 @@ from transformers import AutoProcessor, Idefics2ForConditionalGeneration
12
  from models.conversation import conv_templates
13
  from typing import List
14
  processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
15
- model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", device_map="auto", torch_dtype=torch.float16)
 
16
  MAX_NUM_FRAMES = 24
17
  conv_template = conv_templates["idefics_2"]
18
 
@@ -61,7 +62,7 @@ all the frames of video are as follows:
61
  @spaces.GPU
62
  def generate(text:str, images:List[Image.Image], history: List[dict], **kwargs):
63
  global processor, model
64
- model = model.to("cuda") if model.device.type != "cuda" else model
65
  if not images:
66
  images = None
67
 
 
12
  from models.conversation import conv_templates
13
  from typing import List
14
  processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
15
+ model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", torch_dtype=torch.float16)
16
+ model = model.to("cuda") if model.device.type != "cuda" else model
17
  MAX_NUM_FRAMES = 24
18
  conv_template = conv_templates["idefics_2"]
19
 
 
62
  @spaces.GPU
63
  def generate(text:str, images:List[Image.Image], history: List[dict], **kwargs):
64
  global processor, model
65
+ # model = model.to("cuda") if model.device.type != "cuda" else model
66
  if not images:
67
  images = None
68