DawnC commited on
Commit
4f8052f
1 Parent(s): 63b2431

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -22
app.py CHANGED
@@ -13,6 +13,10 @@ from urllib.parse import quote
13
  from ultralytics import YOLO
14
  import asyncio
15
  import traceback
 
 
 
 
16
 
17
 
18
  # 下載YOLOv8預訓練模型
@@ -152,29 +156,67 @@ def format_description(description, breed):
152
  async def predict_single_dog(image):
153
  return await asyncio.to_thread(_predict_single_dog, image)
154
 
155
- def _predict_single_dog(image):
156
- image_tensor = preprocess_image(image)
157
- with torch.no_grad():
158
- output = model(image_tensor)
159
- logits = output[0] if isinstance(output, tuple) else output
160
- probabilities = F.softmax(logits, dim=1)
161
- topk_probs, topk_indices = torch.topk(probabilities, k=3)
162
- top1_prob = topk_probs[0][0].item()
163
- topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]]
164
- topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]]
165
- return top1_prob, topk_breeds, topk_probs_percent
 
 
 
 
 
 
 
 
 
 
 
 
166
 
167
 
168
  async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4):
169
- results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
170
- dogs = []
171
- for box in results.boxes:
172
- if box.cls == 16: # COCO 資料集中狗的類別是 16
173
- xyxy = box.xyxy[0].tolist()
174
- confidence = box.conf.item()
175
- cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3]))
176
- dogs.append((cropped_image, confidence, xyxy))
177
- return dogs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
 
180
  async def process_single_dog(image):
@@ -434,8 +476,8 @@ async def predict(image):
434
  return final_explanation, annotated_image, [], gr.update(visible=False), initial_state
435
 
436
  except Exception as e:
437
- error_msg = f"An error occurred: {str(e)}"
438
- print(error_msg) # Add log output
439
  return error_msg, None, [], gr.update(visible=False), None
440
 
441
  def show_details(choice, previous_output, initial_state):
 
13
  from ultralytics import YOLO
14
  import asyncio
15
  import traceback
16
+ import logging
17
+
18
+ logging.basicConfig(level=logging.DEBUG)
19
+ logger = logging.getLogger(__name__)
20
 
21
 
22
  # 下載YOLOv8預訓練模型
 
156
  async def predict_single_dog(image):
157
  return await asyncio.to_thread(_predict_single_dog, image)
158
 
159
+ # def _predict_single_dog(image):
160
+ # image_tensor = preprocess_image(image)
161
+ # with torch.no_grad():
162
+ # output = model(image_tensor)
163
+ # logits = output[0] if isinstance(output, tuple) else output
164
+ # probabilities = F.softmax(logits, dim=1)
165
+ # topk_probs, topk_indices = torch.topk(probabilities, k=3)
166
+ # top1_prob = topk_probs[0][0].item()
167
+ # topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]]
168
+ # topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]]
169
+ # return top1_prob, topk_breeds, topk_probs_percent
170
+
171
+
172
+ # async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4):
173
+ # results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
174
+ # dogs = []
175
+ # for box in results.boxes:
176
+ # if box.cls == 16: # COCO 資料集中狗的類別是 16
177
+ # xyxy = box.xyxy[0].tolist()
178
+ # confidence = box.conf.item()
179
+ # cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3]))
180
+ # dogs.append((cropped_image, confidence, xyxy))
181
+ # return dogs
182
 
183
 
184
  async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4):
185
+ try:
186
+ results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
187
+ dogs = []
188
+ for box in results.boxes:
189
+ if box.cls == 16: # COCO dataset class for dog is 16
190
+ xyxy = box.xyxy[0].tolist()
191
+ confidence = box.conf.item()
192
+ cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3]))
193
+ dogs.append((cropped_image, confidence, xyxy))
194
+
195
+ # If no dogs are detected, use the whole image
196
+ if not dogs:
197
+ logger.info("No dogs detected, using the whole image.")
198
+ dogs = [(image, 1.0, [0, 0, image.width, image.height])]
199
+
200
+ return dogs
201
+ except Exception as e:
202
+ logger.error(f"Error in detect_multiple_dogs: {str(e)}")
203
+ return [(image, 1.0, [0, 0, image.width, image.height])]
204
+
205
+ async def predict_single_dog(image):
206
+ try:
207
+ image_tensor = preprocess_image(image)
208
+ with torch.no_grad():
209
+ output = model(image_tensor)
210
+ logits = output[0] if isinstance(output, tuple) else output
211
+ probabilities = F.softmax(logits, dim=1)
212
+ topk_probs, topk_indices = torch.topk(probabilities, k=3)
213
+ top1_prob = topk_probs[0][0].item()
214
+ topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]]
215
+ topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]]
216
+ return top1_prob, topk_breeds, topk_probs_percent
217
+ except Exception as e:
218
+ logger.error(f"Error in predict_single_dog: {str(e)}")
219
+ return 0, ["Unknown"], ["0%"]
220
 
221
 
222
  async def process_single_dog(image):
 
476
  return final_explanation, annotated_image, [], gr.update(visible=False), initial_state
477
 
478
  except Exception as e:
479
+ error_msg = f"An error occurred: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
480
+ logger.error(error_msg)
481
  return error_msg, None, [], gr.update(visible=False), None
482
 
483
  def show_details(choice, previous_output, initial_state):