Soham0708 commited on
Commit
bcda876
1 Parent(s): 637ff73

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -0
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ images_dir = "images"
2
+ import io
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
4
+ from qwen_vl_utils import process_vision_info
5
+ from PIL import Image
6
+ import torch
7
+ torch.cuda.empty_cache()
8
+
9
+ from fastapi import FastAPI, File, Form,UploadFile,HTTPException
10
+
11
+ app=FastAPI()
12
+
13
+ app.cor
14
+
15
+ def run_model(image,text_input):
16
+ torch.cuda.empty_cache()
17
+ model_id= "Qwen/Qwen2-VL-7B-Instruct-AWQ"
18
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
19
+ model_id , torch_dtype=torch.float16, device_map="cuda:0"
20
+ )
21
+ min_pixels = 256*28*28
22
+ max_pixels = 1280*28*28
23
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct-AWQ", min_pixels=min_pixels, max_pixels=max_pixels)
24
+
25
+ torch.cuda.empty_cache()
26
+ image_path = Image.open(image)
27
+ print(image_path)
28
+ messages = [
29
+ {
30
+ "role": "user",
31
+ "content": [
32
+ {
33
+ "type": "image",
34
+ "image": image_path,
35
+ },
36
+ {"type": "text", "text": text_input},
37
+ ],
38
+ }
39
+ ]
40
+
41
+ text = processor.apply_chat_template(
42
+ messages, tokenize=False, add_generation_prompt=True
43
+ )
44
+ image_inputs, video_inputs = process_vision_info(messages)
45
+ inputs = processor(
46
+ text=[text],
47
+ images=image_inputs,
48
+ videos=video_inputs,
49
+ padding=True,
50
+ return_tensors="pt",
51
+ )
52
+ inputs = inputs.to("cuda")
53
+
54
+ # Inference: Generation of the output
55
+ torch.cuda.empty_cache()
56
+
57
+ generated_ids = model.generate(**inputs, max_new_tokens=1024)
58
+ generated_ids_trimmed = [
59
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
60
+ ]
61
+ output_text = processor.batch_decode(
62
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
63
+ )
64
+ return output_text[0]
65
+
66
+
67
+ @app.post("/call_qwen_model")
68
+ async def call_model(file: UploadFile = File(...),json_str: str = Form(...)):
69
+ try:
70
+ request_object_content = await file.read()
71
+ img = io.BytesIO(request_object_content)
72
+ output = run_model(img, json_str)
73
+ return {"output": output}
74
+ except Exception as e :
75
+ raise HTTPException (f"Error: {e}")