Update README.md
Browse files
README.md
CHANGED
@@ -66,23 +66,26 @@ The model achieved high accuracy on a held-out validation set, indicating strong
|
|
66 |
To use the model for inference, you can load it using the `transformers` library from Hugging Face:
|
67 |
|
68 |
```python
|
69 |
-
|
70 |
from PIL import Image
|
71 |
-
import
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
image = Image.open(requests.get(url, stream=True).raw)
|
76 |
|
|
|
77 |
model = AutoModelForImageClassification.from_pretrained("Falconsai/brand_identification")
|
78 |
processor = ViTImageProcessor.from_pretrained("Falconsai/brand_identification")
|
|
|
|
|
79 |
with torch.no_grad():
|
80 |
inputs = processor(images=image, return_tensors="pt")
|
81 |
outputs = model(**inputs)
|
82 |
logits = outputs.logits
|
83 |
|
84 |
predicted_label = logits.argmax(-1).item()
|
85 |
-
model.config.id2label[predicted_label]
|
|
|
86 |
```
|
87 |
### Companies Identified:
|
88 |
|
|
|
66 |
To use the model for inference, you can load it using the `transformers` library from Hugging Face:
|
67 |
|
68 |
```python
|
69 |
+
import torch
|
70 |
from PIL import Image
|
71 |
+
from transformers import AutoModelForImageClassification, ViTImageProcessor
|
72 |
|
73 |
+
image = Image.open('<path_to_image>')
|
74 |
+
image = image.convert("RGB") # Ensure image is in RGB format
|
|
|
75 |
|
76 |
+
# Load model and processor
|
77 |
model = AutoModelForImageClassification.from_pretrained("Falconsai/brand_identification")
|
78 |
processor = ViTImageProcessor.from_pretrained("Falconsai/brand_identification")
|
79 |
+
|
80 |
+
# Preprocess image and make predictions
|
81 |
with torch.no_grad():
|
82 |
inputs = processor(images=image, return_tensors="pt")
|
83 |
outputs = model(**inputs)
|
84 |
logits = outputs.logits
|
85 |
|
86 |
predicted_label = logits.argmax(-1).item()
|
87 |
+
print(model.config.id2label[predicted_label])
|
88 |
+
|
89 |
```
|
90 |
### Companies Identified:
|
91 |
|