File size: 1,636 Bytes
6a4e454
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eed61fa
6a4e454
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import gradio as gr
from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
import spaces
import torch

model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/SDXL-Captioner").to("cuda").eval()
processor = PaliGemmaProcessor.from_pretrained("gokaygokay/SDXL-Captioner")

# Example usage in your existing function
@spaces.GPU
def create_captions_rich(image):   
    prompt = "caption en"
    model_inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda")
    input_len = model_inputs["input_ids"].shape[-1]

    with torch.inference_mode():
        generation = model.generate(**model_inputs, repetition_penalty=1.10, max_new_tokens=256, do_sample=False)
        generation = generation[0][input_len:]
        decoded = processor.decode(generation, skip_special_tokens=True)

    return decoded

css = """
  #mkd {
    height: 500px; 
    overflow: auto; 
    border: 1px solid #ccc; 
  }
"""

with gr.Blocks(css=css) as demo:
  gr.HTML("<h1><center>PaliGemma Fine-tuned for SDXL prompt generation<center><h1>")
  with gr.Tab(label="PaliGemma SDXL Captioner"):
    with gr.Row():
      with gr.Column():
        input_img = gr.Image(label="Input Picture")
        submit_btn = gr.Button(value="Submit")
      output = gr.Text(label="Caption")

    gr.Examples(
    [["image1.jpg"], ["image2.jpg"], ["image3.png"], ["image4.jpg"], ["image5.jpg"], ["image6.PNG"]],
    inputs = [input_img],
    outputs = [output],
    fn=create_captions_rich,
    label='Try captioning on examples'
    )

    submit_btn.click(create_captions_rich, [input_img], [output])

demo.launch(debug=True)