Spaces:
Runtime error
Runtime error
Commit
·
aa464d7
1
Parent(s):
d242cc5
Update app.py
Browse files
app.py
CHANGED
@@ -7,13 +7,14 @@ CAPTION_MODELS = {
|
|
7 |
'blip-large': 'Salesforce/blip-image-captioning-large',
|
8 |
'vit-gpt2-coco-en': 'ydshieh/vit-gpt2-coco-en',
|
9 |
'blip2-2.7b-fp16': 'Mediocreatmybest/blip2-opt-2.7b-fp16-sharded',
|
|
|
10 |
}
|
11 |
|
12 |
# Create a dictionary to store loaded models
|
13 |
loaded_models = {}
|
14 |
|
15 |
# Simple caption creation
|
16 |
-
def caption_image(model_choice, image_input, url_input):
|
17 |
if image_input is not None:
|
18 |
input_data = image_input
|
19 |
else:
|
@@ -23,10 +24,11 @@ def caption_image(model_choice, image_input, url_input):
|
|
23 |
if model_choice in loaded_models:
|
24 |
captioner = loaded_models[model_choice]
|
25 |
else:
|
|
|
26 |
captioner = pipeline(task="image-to-text",
|
27 |
model=CAPTION_MODELS[model_choice],
|
28 |
max_new_tokens=30,
|
29 |
-
device_map="cpu", use_fast=True
|
30 |
)
|
31 |
# Store the loaded model
|
32 |
loaded_models[model_choice] = captioner
|
@@ -34,12 +36,13 @@ def caption_image(model_choice, image_input, url_input):
|
|
34 |
caption = captioner(input_data)[0]['generated_text']
|
35 |
return str(caption).strip()
|
36 |
|
37 |
-
def launch(model_choice, image_input, url_input):
|
38 |
-
return caption_image(model_choice, image_input, url_input)
|
39 |
|
40 |
model_dropdown = gr.Dropdown(choices=list(CAPTION_MODELS.keys()), label='Select Caption Model')
|
41 |
image_input = gr.Image(type="pil", label="Input Image")
|
42 |
url_input = gr.Text(label="Input URL")
|
|
|
43 |
|
44 |
-
iface = gr.Interface(launch, inputs=[model_dropdown, image_input, url_input], outputs="text")
|
45 |
-
iface.launch()
|
|
|
7 |
'blip-large': 'Salesforce/blip-image-captioning-large',
|
8 |
'vit-gpt2-coco-en': 'ydshieh/vit-gpt2-coco-en',
|
9 |
'blip2-2.7b-fp16': 'Mediocreatmybest/blip2-opt-2.7b-fp16-sharded',
|
10 |
+
'blip2-2.7b': 'Salesforce/blip2-opt-2.7b',
|
11 |
}
|
12 |
|
13 |
# Create a dictionary to store loaded models
|
14 |
loaded_models = {}
|
15 |
|
16 |
# Simple caption creation
|
17 |
+
def caption_image(model_choice, image_input, url_input, load_in_8bit):
|
18 |
if image_input is not None:
|
19 |
input_data = image_input
|
20 |
else:
|
|
|
24 |
if model_choice in loaded_models:
|
25 |
captioner = loaded_models[model_choice]
|
26 |
else:
|
27 |
+
model_kwargs = {"load_in_8bit": load_in_8bit} if load_in_8bit else {}
|
28 |
captioner = pipeline(task="image-to-text",
|
29 |
model=CAPTION_MODELS[model_choice],
|
30 |
max_new_tokens=30,
|
31 |
+
device_map="cpu", model_kwargs=model_kwargs, use_fast=True
|
32 |
)
|
33 |
# Store the loaded model
|
34 |
loaded_models[model_choice] = captioner
|
|
|
36 |
caption = captioner(input_data)[0]['generated_text']
|
37 |
return str(caption).strip()
|
38 |
|
39 |
+
def launch(model_choice, image_input, url_input, load_in_8bit):
|
40 |
+
return caption_image(model_choice, image_input, url_input, load_in_8bit)
|
41 |
|
42 |
model_dropdown = gr.Dropdown(choices=list(CAPTION_MODELS.keys()), label='Select Caption Model')
|
43 |
image_input = gr.Image(type="pil", label="Input Image")
|
44 |
url_input = gr.Text(label="Input URL")
|
45 |
+
load_in_8bit = gr.Checkbox(label="Load model in 8bit")
|
46 |
|
47 |
+
iface = gr.Interface(launch, inputs=[model_dropdown, image_input, url_input, load_in_8bit], outputs="text")
|
48 |
+
iface.launch()
|