owaiskha9654 commited on
Commit
dbe3b60
·
1 Parent(s): bf0e3ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -16
app.py CHANGED
@@ -6,31 +6,42 @@ from PIL import Image
6
  REPO_ID = "owaiskha9654/Yolov7_Custom_Object_Detection"
7
  FILENAME = "best.pt"
8
 
9
- yolov5_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
10
 
11
- model = torch.hub.load('WongKinYiu/yolov7:main',model='custom', path_or_model=yolov5_weights, force_reload=True) # local repo
12
 
13
- def object_detection(im, size=640):
14
- results = model(im) # inference
15
- #results.print() # print results to screen
16
- #results.show() # display results
17
- #results.save() # save as results1.jpg, results2.jpg... etc.
18
- results.render() # updates results.imgs with boxes and labels
19
  return Image.fromarray(results.imgs[0])
20
 
21
  title = "Yolov7 Custom"
22
- description = """Esse modelo é uma pequena demonstração baseada em uma análise de cerca de 60 imagens somente. Para resultados mais confiáveis e genéricos, são necessários mais exemplos (imagens).
23
- """
24
 
25
- image = gr.inputs.Image(shape=(640, 640), image_mode="RGB", source="upload", label="Imagem", optional=False)
26
  outputs = gr.outputs.Image(type="pil", label="Output Image")
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  gr.Interface(
29
  fn=object_detection,
30
  inputs=image,
31
  outputs=outputs,
32
- title=title,
33
- description=description,
34
- examples=[["car-person-2.jpg"], ["car-person-2.jpg"]]
35
- ,cache_examples=False
36
- ).launch()
 
6
  REPO_ID = "owaiskha9654/Yolov7_Custom_Object_Detection"
7
  FILENAME = "best.pt"
8
 
9
+ yolov7_custom_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
10
 
11
+ model = torch.hub.load('WongKinYiu/yolov7:main',model='custom', path_or_model=yolov7_custom_weights, force_reload=True) # local repo
12
 
13
+ def object_detection(im, size=416):
14
+ results = model(im)
15
+ results.render()
 
 
 
16
  return Image.fromarray(results.imgs[0])
17
 
18
  title = "Yolov7 Custom"
 
 
19
 
20
+ image = gr.inputs.Image(shape=(416, 416), image_mode="RGB", source="upload", label="Upload Image", optional=False)
21
  outputs = gr.outputs.Image(type="pil", label="Output Image")
22
 
23
+ Custom_description="<center>Custom Training Performed on Kaggle <a href='https://www.kaggle.com/code/owaiskhan9654/training-yolov7-on-kaggle-on-custom-dataset/notebook' style='text-decoration: underline' target='_blank'>Link</a> </center><br> <center>Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors </center> <br> <b>1st</b> class is for Person Detected<br><b>2nd</b> class is for Car Detected"
24
+
25
+ Footer = (
26
+ "<center>Model Trained by: Owais Ahmad Data Scientist at <b> Thoucentric </b> <a href=\"https://www.linkedin.com/in/owaiskhan9654/\">Visit Profile</a> <br></center>"
27
+
28
+ "<center> Model Trained Kaggle Kernel <a href=\"https://www.kaggle.com/code/owaiskhan9654/training-yolov7-on-kaggle-on-custom-dataset/notebook\">Link</a> <br></center>"
29
+
30
+ "<center> Kaggle Profile <a href=\"https://www.kaggle.com/owaiskhan9654\">Link</a> <br> </center>"
31
+
32
+ "<center> HuggingFace🤗 Model Deployed Repository <a href=\"https://huggingface.co/owaiskha9654/Yolov7_Custom_Object_Detection\">Link</a> <br></center>"
33
+ )
34
+
35
+ examples1=[["Image1.jpeg"],["Image2.jpeg"],["Image3.jpeg"],["Image4.jpeg"],["Image5.jpeg"],["Image6.jpeg"],["horses.jpeg"],["horses.jpeg"]]
36
+
37
+ Top_Title="<center>Yolov7 🚀 Custom Trained by <a href='https://www.linkedin.com/in/owaiskhan9654/' style='text-decoration: underline' target='_blank'>Owais Ahmad </center></a>🚗Car and 👦Person Detection"
38
+
39
+
40
  gr.Interface(
41
  fn=object_detection,
42
  inputs=image,
43
  outputs=outputs,
44
+ title=Top_Title,
45
+ description=Custom_description,
46
+ article=Footer,
47
+ examples=[["car-person-2.jpg"], ["car-person-2.jpg"]]).launch()