MrOvkill commited on
Commit
b95210f
1 Parent(s): 3467e62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -9,28 +9,28 @@ from PIL import Image
9
  from typing import Union
10
  import os
11
 
12
- device = "cuda" if torch.cuda.is_available() else "cpu"
13
  print(f"Using {device}" if device != "cpu" else "Using CPU")
14
 
15
  def _load_model():
16
- tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2", trust_remote_code=True, revision="2024-05-08", torch_dtype=(torch.bfloat16 if device == 'cuda' else torch.float32))
17
- model = AutoModelForCausalLM.from_pretrained("vikhyatk/moondream2", device_map=device, trust_remote_code=True, revision="2024-05-08")
18
- return (model, tokenizer)
19
 
20
  class MoonDream():
21
- def __init__(self, model=None, tokenizer=None):
22
- self.model, self.tokenizer = (model, tokenizer)
23
- if not model or model is None or not tokenizer or tokenizer is None:
24
- self.model, self.tokenizer = _load_model()
25
- self.device = device
26
- self.model.to(self.device)
27
- def __call__(self, question, imgs):
28
- imn = 0
29
- for img in imgs:
30
- img = self.model.encode_image(img)
31
- res = self.model.answer_question(question=question, image_embeds=img, tokenizer=self.tokenizer)
32
- yield res
33
- return
34
 
35
  def _respond_one(question, img):
36
  txt = ""
@@ -116,7 +116,7 @@ with gr.Blocks() as demo:
116
  with gr.Row():
117
  minst = gr.Textbox(label="Merge Instructions")
118
  with gr.Row():
119
- btn2 = gr.Button("submit batch")
120
  with gr.Row():
121
  with gr.Column():
122
  otp2 = gr.Textbox(label="individual batch output (left)", interactive=True)
 
9
  from typing import Union
10
  import os
11
 
12
+ device = "cuda"
13
  print(f"Using {device}" if device != "cpu" else "Using CPU")
14
 
15
  def _load_model():
16
+ tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2", trust_remote_code=True, revision="2024-05-08", torch_dtype=(torch.bfloat16 if device == 'cuda' else torch.float32))
17
+ model = AutoModelForCausalLM.from_pretrained("vikhyatk/moondream2", device_map=device, trust_remote_code=True, revision="2024-05-08")
18
+ return (model, tokenizer)
19
 
20
  class MoonDream():
21
+ def __init__(self, model=None, tokenizer=None):
22
+ self.model, self.tokenizer = (model, tokenizer)
23
+ if not model or model is None or not tokenizer or tokenizer is None:
24
+ self.model, self.tokenizer = _load_model()
25
+ self.device = device
26
+ self.model.to(self.device)
27
+ def __call__(self, question, imgs):
28
+ imn = 0
29
+ for img in imgs:
30
+ img = self.model.encode_image(img)
31
+ res = self.model.answer_question(question=question, image_embeds=img, tokenizer=self.tokenizer)
32
+ yield res
33
+ return
34
 
35
  def _respond_one(question, img):
36
  txt = ""
 
116
  with gr.Row():
117
  minst = gr.Textbox(label="Merge Instructions")
118
  with gr.Row():
119
+ btn2 = gr.Button("submit batch")
120
  with gr.Row():
121
  with gr.Column():
122
  otp2 = gr.Textbox(label="individual batch output (left)", interactive=True)