Spaces:
Running
Running
Update app.py
#3
by
bilgeyucel
- opened
app.py
CHANGED
@@ -5,7 +5,6 @@ from haystack.components.generators import HuggingFaceTGIGenerator
|
|
5 |
from haystack.components.builders.prompt_builder import PromptBuilder
|
6 |
from haystack import Pipeline
|
7 |
from haystack.utils import Secret
|
8 |
-
|
9 |
from image_captioner import ImageCaptioner
|
10 |
|
11 |
description = """
|
@@ -16,7 +15,7 @@ description = """
|
|
16 |
* Choose your model
|
17 |
* ✨ Captionate! ✨
|
18 |
|
19 |
-
It uses [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base) model for
|
20 |
|
21 |
For Instagrammable captions, `mistralai/Mistral-7B-Instruct-v0.2` performs best, but try different models to see how they react to the same prompt.
|
22 |
|
@@ -34,11 +33,10 @@ Instagram Caption:
|
|
34 |
hf_api_key = os.environ["HF_API_KEY"]
|
35 |
|
36 |
def generate_caption(image_file_path, model_name):
|
37 |
-
image_to_text = ImageCaptioner(
|
38 |
-
model_name="Salesforce/blip-image-captioning-base",
|
39 |
-
)
|
40 |
prompt_builder = PromptBuilder(template=prompt_template)
|
41 |
generator = HuggingFaceTGIGenerator(model=model_name, token=Secret.from_token(hf_api_key), generation_kwargs={"max_new_tokens":100})
|
|
|
42 |
captioning_pipeline = Pipeline()
|
43 |
captioning_pipeline.add_component("image_to_text", image_to_text)
|
44 |
captioning_pipeline.add_component("prompt_builder", prompt_builder)
|
@@ -55,7 +53,11 @@ with gr.Blocks(theme="soft") as demo:
|
|
55 |
with gr.Row():
|
56 |
image = gr.Image(type="filepath")
|
57 |
with gr.Column():
|
58 |
-
model_name = gr.Dropdown(
|
|
|
|
|
|
|
|
|
59 |
gr.Examples(["./whale.png", "./rainbow.jpeg", "./selfie.png"], inputs=image, label="Click on any example")
|
60 |
submit_btn = gr.Button("✨ Captionate ✨")
|
61 |
caption = gr.Textbox(label="Caption", show_copy_button=True)
|
|
|
5 |
from haystack.components.builders.prompt_builder import PromptBuilder
|
6 |
from haystack import Pipeline
|
7 |
from haystack.utils import Secret
|
|
|
8 |
from image_captioner import ImageCaptioner
|
9 |
|
10 |
description = """
|
|
|
15 |
* Choose your model
|
16 |
* ✨ Captionate! ✨
|
17 |
|
18 |
+
It uses [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base) model for image-to-text caption generation task.
|
19 |
|
20 |
For Instagrammable captions, `mistralai/Mistral-7B-Instruct-v0.2` performs best, but try different models to see how they react to the same prompt.
|
21 |
|
|
|
33 |
hf_api_key = os.environ["HF_API_KEY"]
|
34 |
|
35 |
def generate_caption(image_file_path, model_name):
|
36 |
+
image_to_text = ImageCaptioner(model_name="Salesforce/blip-image-captioning-base")
|
|
|
|
|
37 |
prompt_builder = PromptBuilder(template=prompt_template)
|
38 |
generator = HuggingFaceTGIGenerator(model=model_name, token=Secret.from_token(hf_api_key), generation_kwargs={"max_new_tokens":100})
|
39 |
+
|
40 |
captioning_pipeline = Pipeline()
|
41 |
captioning_pipeline.add_component("image_to_text", image_to_text)
|
42 |
captioning_pipeline.add_component("prompt_builder", prompt_builder)
|
|
|
53 |
with gr.Row():
|
54 |
image = gr.Image(type="filepath")
|
55 |
with gr.Column():
|
56 |
+
model_name = gr.Dropdown(
|
57 |
+
["mistralai/Mistral-7B-Instruct-v0.2","OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", "tiiuae/falcon-7b-instruct", "tiiuae/falcon-7b", "HuggingFaceH4/starchat-beta", "bigscience/bloom", "google/flan-t5-xxl"],
|
58 |
+
value="mistralai/Mistral-7B-Instruct-v0.2",
|
59 |
+
label="Choose your model!"
|
60 |
+
)
|
61 |
gr.Examples(["./whale.png", "./rainbow.jpeg", "./selfie.png"], inputs=image, label="Click on any example")
|
62 |
submit_btn = gr.Button("✨ Captionate ✨")
|
63 |
caption = gr.Textbox(label="Caption", show_copy_button=True)
|