Nepjune commited on
Commit
6d97bc1
·
verified ·
1 Parent(s): 99eef42

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -0
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install transformers
2
+ !pip install gradio
3
+ from transformers import pipeline
4
+ image_captioner = pipeline("image-to-text",model="Salesforce/blip-image-captioning-large")
5
+ #Set up Prerequisites for Image Captioning App User Interface
6
+ import os
7
+ import io
8
+ import IPython.display
9
+ from PIL import Image
10
+ import base64
11
+
12
+ import gradio as gr
13
+
14
+ def image_to_base64_str(pil_image):
15
+ byte_arr = io.BytesIO()
16
+ pil_image.save(byte_arr, format='PNG')
17
+ byte_arr = byte_arr.getvalue()
18
+ return str(base64.b64encode(byte_arr).decode('utf-8'))
19
+ def captioner(image):
20
+ base64_image = image_to_base64_str(image)
21
+ result = image_captioner(base64_image)
22
+ return result[0]['generated_text']
23
+ gr.close_all()
24
+ ImageCaptionApp = gr.Interface(fn=captioner,
25
+ inputs=[gr.Image(label="Upload image", type="pil")],
26
+ outputs=[gr.Textbox(label="Caption")],
27
+ title="Image Captioning with BLIP",
28
+ description="Caption any image using the BLIP model",
29
+ allow_flagging="never")
30
+
31
+ ImageCaptionApp.launch()