Spaces:
Running
Running
import gradio as gr | |
import numpy as np | |
from options import Banner, Video | |
from huggingface_hub import login | |
import os | |
login(token=os.getenv("TOKEN")) | |
MAX_SEED = np.iinfo(np.int32).max | |
MAX_IMAGE_SIZE = 2048 | |
with gr.Blocks() as demo: | |
gr.Markdown("# Create your own Advertisement") | |
with gr.Tab("Banner"): | |
gr.Markdown("# Take your banner to the next LEVEL!") | |
with gr.TabItem("Create your Banner"): | |
textInput = gr.Textbox(label="Enter the text to get a good start") | |
with gr.Accordion("Advanced Settings", open=False): | |
with gr.Row(): | |
width = gr.Slider( | |
label="Width", | |
minimum=256, | |
maximum=MAX_IMAGE_SIZE, | |
step=8, | |
value=1024, | |
) | |
height = gr.Slider( | |
label="Height", | |
minimum=256, | |
maximum=MAX_IMAGE_SIZE, | |
step=32, | |
value=1024, | |
) | |
guidance_scale = gr.Slider( | |
label="Guidance Scale", | |
minimum=1, | |
maximum=15, | |
step=0.1, | |
value=3.5, | |
) | |
num_inference_steps = gr.Slider( | |
label="Number of Inference Steps", | |
minimum=1, | |
maximum=50, | |
step=1, | |
value=28, | |
) | |
submit = gr.Button("Submit") | |
submit.click( | |
fn=Banner.TextImage, | |
inputs=[textInput, width, height, guidance_scale, num_inference_steps], | |
outputs=gr.Image() | |
) | |
with gr.TabItem("Edit your Banner"): | |
input_image_editor_component = gr.ImageEditor( | |
label='Image', | |
type='pil', | |
sources=["upload", "webcam"], | |
image_mode='RGB', | |
layers=False, | |
brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed")) | |
prompt = gr.Textbox(label="Enter the text to get a good start") | |
out_img=gr.Image() | |
btn = gr.Button() | |
btn.click(Banner.Image2Image, [prompt,input_image_editor_component], out_img) | |
with gr.TabItem("Upgrade your Banner"): | |
img = gr.Image() | |
prompt = gr.Textbox(label="Enter the text to get a good start") | |
btn = gr.Button() | |
size = gr.Slider(label="Size", minimum=256, maximum=MAX_IMAGE_SIZE, step=8, value=1024) | |
out_img = gr.Image() | |
btn.click(Banner.Image2Image_2, [prompt, img,size,num_inference_steps], out_img) | |
with gr.Tab("Video"): | |
gr.Markdown("# Create your own Video") | |
img=gr.Image() | |
btn = gr.Button() | |
video=gr.Video() | |
btn.click(Video.Video, img, video) | |
demo.launch() | |