Shahabmoin commited on
Commit
8c4efdd
·
verified ·
1 Parent(s): de9f3f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -11,7 +11,7 @@ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
11
  # Function to make predictions from the image
12
  def predict_image_description(image):
13
  # Preprocess the image and generate text inputs
14
- inputs = processor(text=["a photo of a cat", "a photo of a dog", "a photo of a car", "a photo of a tree", "a photo of a house"],
15
  images=image,
16
  return_tensors="pt",
17
  padding=True)
@@ -23,7 +23,7 @@ def predict_image_description(image):
23
 
24
  # Return top 3 predictions
25
  top_3_probabilities, top_3_indices = torch.topk(probs, 3)
26
- labels = ["a cat", "a dog", "a car", "a tree", "a house"]
27
 
28
  predictions = []
29
  for i in range(3):
 
11
  # Function to make predictions from the image
12
  def predict_image_description(image):
13
  # Preprocess the image and generate text inputs
14
+ inputs = processor(text=["a photo of an animal", "a photo of a human", "a photo of a car", "a photo of a tree", "a photo of a house"],
15
  images=image,
16
  return_tensors="pt",
17
  padding=True)
 
23
 
24
  # Return top 3 predictions
25
  top_3_probabilities, top_3_indices = torch.topk(probs, 3)
26
+ labels = ["an animal", "a human", "a car", "a tree", "a house"]
27
 
28
  predictions = []
29
  for i in range(3):