Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -30,7 +30,6 @@ from ultralytics import YOLO
|
|
30 |
|
31 |
# if not os.path.exists("/data/icon_detect"):
|
32 |
# os.makedirs("/data/icon_detect")
|
33 |
-
|
34 |
try:
|
35 |
yolo_model = torch.load("weights/icon_detect/best.pt", map_location="cuda", weights_only=False)["model"]
|
36 |
yolo_model = yolo_model.to("cuda")
|
@@ -43,13 +42,18 @@ processor = AutoProcessor.from_pretrained(
|
|
43 |
"microsoft/Florence-2-base", trust_remote_code=True
|
44 |
)
|
45 |
|
46 |
-
|
47 |
-
model = AutoModelForCausalLM.from_pretrained(
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
53 |
caption_model_processor = {"processor": processor, "model": model}
|
54 |
print("finish loading model!!!")
|
55 |
|
@@ -66,6 +70,7 @@ def process(
|
|
66 |
image_input: Image.Image, box_threshold: float, iou_threshold: float
|
67 |
) -> ProcessResponse:
|
68 |
image_save_path = "imgs/saved_image_demo.png"
|
|
|
69 |
image_input.save(image_save_path)
|
70 |
image = Image.open(image_save_path)
|
71 |
box_overlay_ratio = image.size[0] / 3200
|
@@ -121,8 +126,20 @@ async def process_image(
|
|
121 |
try:
|
122 |
contents = await image_file.read()
|
123 |
image_input = Image.open(io.BytesIO(contents)).convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
except Exception as e:
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
return response
|
|
|
30 |
|
31 |
# if not os.path.exists("/data/icon_detect"):
|
32 |
# os.makedirs("/data/icon_detect")
|
|
|
33 |
try:
|
34 |
yolo_model = torch.load("weights/icon_detect/best.pt", map_location="cuda", weights_only=False)["model"]
|
35 |
yolo_model = yolo_model.to("cuda")
|
|
|
42 |
"microsoft/Florence-2-base", trust_remote_code=True
|
43 |
)
|
44 |
|
45 |
+
try:
|
46 |
+
model = AutoModelForCausalLM.from_pretrained(
|
47 |
+
"weights/icon_caption_florence",
|
48 |
+
torch_dtype=torch.float16,
|
49 |
+
trust_remote_code=True,
|
50 |
+
).to("cuda")
|
51 |
+
except:
|
52 |
+
model = AutoModelForCausalLM.from_pretrained(
|
53 |
+
"weights/icon_caption_florence",
|
54 |
+
torch_dtype=torch.float16,
|
55 |
+
trust_remote_code=True,
|
56 |
+
)
|
57 |
caption_model_processor = {"processor": processor, "model": model}
|
58 |
print("finish loading model!!!")
|
59 |
|
|
|
70 |
image_input: Image.Image, box_threshold: float, iou_threshold: float
|
71 |
) -> ProcessResponse:
|
72 |
image_save_path = "imgs/saved_image_demo.png"
|
73 |
+
os.makedirs(os.path.dirname(image_save_path), exist_ok=True)
|
74 |
image_input.save(image_save_path)
|
75 |
image = Image.open(image_save_path)
|
76 |
box_overlay_ratio = image.size[0] / 3200
|
|
|
126 |
try:
|
127 |
contents = await image_file.read()
|
128 |
image_input = Image.open(io.BytesIO(contents)).convert("RGB")
|
129 |
+
|
130 |
+
# Add debug logging
|
131 |
+
print(f"Processing image: {image_file.filename}")
|
132 |
+
print(f"Image size: {image_input.size}")
|
133 |
+
|
134 |
+
response = process(image_input, box_threshold, iou_threshold)
|
135 |
+
|
136 |
+
# Validate response
|
137 |
+
if not response.image:
|
138 |
+
raise ValueError("Empty image in response")
|
139 |
+
|
140 |
+
return response
|
141 |
+
|
142 |
except Exception as e:
|
143 |
+
import traceback
|
144 |
+
traceback.print_exc() # This will show full error in logs
|
145 |
+
raise HTTPException(status_code=500, detail=str(e))
|
|