Pranathi1 commited on
Commit
70db8bd
1 Parent(s): c5b419d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -17,7 +17,7 @@ def load_models():
17
  model = Qwen2VLForConditionalGeneration.from_pretrained(
18
  "Qwen/Qwen2-VL-2B-Instruct",
19
  trust_remote_code=True,
20
- torch_dtype=torch.bfloat16).cuda().eval()
21
 
22
  processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
23
 
@@ -65,7 +65,8 @@ if uploaded_file is not None:
65
  padding=True,
66
  return_tensors="pt",
67
  )
68
- inputs = inputs.to("cuda")
 
69
 
70
  # Generate the text from the image using the model
71
  generated_ids = model.generate(**inputs, max_new_tokens=5000)
@@ -142,7 +143,7 @@ if uploaded_file is not None:
142
  padding=True,
143
  return_tensors="pt",
144
  )
145
- inputs = inputs.to("cuda")
146
 
147
  generated_ids_query = model.generate(**inputs, max_new_tokens=1000)
148
  generated_ids_trimmed = [
 
17
  model = Qwen2VLForConditionalGeneration.from_pretrained(
18
  "Qwen/Qwen2-VL-2B-Instruct",
19
  trust_remote_code=True,
20
+ torch_dtype=torch.bfloat16).eval()
21
 
22
  processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
23
 
 
65
  padding=True,
66
  return_tensors="pt",
67
  )
68
+ inputs = inputs.to("cpu")
69
+
70
 
71
  # Generate the text from the image using the model
72
  generated_ids = model.generate(**inputs, max_new_tokens=5000)
 
143
  padding=True,
144
  return_tensors="pt",
145
  )
146
+ inputs = inputs.to("cpu")
147
 
148
  generated_ids_query = model.generate(**inputs, max_new_tokens=1000)
149
  generated_ids_trimmed = [