workwithHasnain
commited on
Commit
•
438574d
1
Parent(s):
7b405e8
Update README.md
Browse files
README.md
CHANGED
@@ -1,8 +1,39 @@
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
-
|
|
|
4 |
---
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
# Model Card for Model ID
|
7 |
|
8 |
<!-- Provide a quick summary of what the model is/does. -->
|
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
+
license: apache-2.0
|
4 |
+
pipeline_tag: image-to-text
|
5 |
---
|
6 |
|
7 |
+
# BLIP-Image-to-recip
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
# Inference code
|
12 |
+
|
13 |
+
---
|
14 |
+
|
15 |
+
import requests
|
16 |
+
from PIL import Image
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
from transformers import BlipForConditionalGeneration, AutoProcessor
|
21 |
+
|
22 |
+
img_url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSQuFg4LTHUattLGPU0kLzYpBGHRtuqgJY8Gho3uZe_cg&s'
|
23 |
+
image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
24 |
+
|
25 |
+
model = BlipForConditionalGeneration.from_pretrained("Fatehmujtaba/BLIP-Image-to-recipe").to(device)
|
26 |
+
processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
27 |
+
|
28 |
+
|
29 |
+
inputs = processor(images=image, return_tensors="pt").to(device)
|
30 |
+
pixel_values = inputs.pixel_values
|
31 |
+
generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
|
32 |
+
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
33 |
+
|
34 |
+
---
|
35 |
+
|
36 |
+
|
37 |
# Model Card for Model ID
|
38 |
|
39 |
<!-- Provide a quick summary of what the model is/does. -->
|