merve HF staff commited on
Commit
173e298
1 Parent(s): 6ec7588

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,8 +5,8 @@ import gradio as gr
5
 
6
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7
 
8
- owl_model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble").to("cuda")
9
- owl_processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble")
10
 
11
  dino_processor = AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-base")
12
  dino_model = AutoModelForZeroShotObjectDetection.from_pretrained("IDEA-Research/grounding-dino-base").to("cuda")
@@ -76,7 +76,7 @@ demo = gr.Interface(
76
  inputs=[gr.Image(label="Input Image"), gr.Textbox(label="Candidate Labels"), owl_threshold, dino_threshold],
77
  outputs=[owl_output, dino_output],
78
  title="OWLv2 ⚔ Grounding DINO",
79
- description="Compare two state-of-the-art zero-shot object detection models [OWLv2](https://huggingface.co/google/owlv2-base-patch16-ensemble) and [Grounding DINO](https://huggingface.co/IDEA-Research/grounding-dino-base) in this Space. Simply enter an image and the objects you want to find with comma, or try one of the examples. Play with the threshold to filter out low confidence predictions in each model.",
80
  examples=[["./bee.jpg", "bee, flower", 0.16, 0.12], ["./cats.png", "cat, fishnet", 0.16, 0.12]]
81
  )
82
  demo.launch(debug=True)
 
5
 
6
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7
 
8
+ owl_model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16").to("cuda")
9
+ owl_processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16")
10
 
11
  dino_processor = AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-base")
12
  dino_model = AutoModelForZeroShotObjectDetection.from_pretrained("IDEA-Research/grounding-dino-base").to("cuda")
 
76
  inputs=[gr.Image(label="Input Image"), gr.Textbox(label="Candidate Labels"), owl_threshold, dino_threshold],
77
  outputs=[owl_output, dino_output],
78
  title="OWLv2 ⚔ Grounding DINO",
79
+ description="Compare two state-of-the-art zero-shot object detection models [OWLv2](https://huggingface.co/google/owlv2-base-patch16) and [Grounding DINO](https://huggingface.co/IDEA-Research/grounding-dino-base) in this Space. Simply enter an image and the objects you want to find with comma, or try one of the examples. Play with the threshold to filter out low confidence predictions in each model.",
80
  examples=[["./bee.jpg", "bee, flower", 0.16, 0.12], ["./cats.png", "cat, fishnet", 0.16, 0.12]]
81
  )
82
  demo.launch(debug=True)