Spaces:
aiqtech
/
Running on Zero

aiqtech commited on
Commit
a15af69
1 Parent(s): 1bdf675

Upload app (16).py

Browse files
Files changed (1) hide show
  1. app (16).py +121 -0
app (16).py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import torch
6
+ import spaces
7
+ from diffusers import FluxPipeline
8
+ from PIL import Image
9
+ from diffusers.utils import export_to_gif
10
+
11
+ HEIGHT = 256
12
+ WIDTH = 1024
13
+ MAX_SEED = np.iinfo(np.int32).max
14
+
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ pipe = FluxPipeline.from_pretrained(
17
+ "black-forest-labs/FLUX.1-dev",
18
+ torch_dtype=torch.bfloat16
19
+ ).to(device)
20
+
21
+ def split_image(input_image, num_splits=4):
22
+ # Create a list to store the output images
23
+ output_images = []
24
+
25
+ # Split the image into four 256x256 sections
26
+ for i in range(num_splits):
27
+ left = i * 256
28
+ right = (i + 1) * 256
29
+ box = (left, 0, right, 256)
30
+ output_images.append(input_image.crop(box))
31
+
32
+ return output_images
33
+
34
+ @spaces.GPU(duration=190)
35
+ def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
36
+ prompt_template = f"""
37
+ A side by side 4 frame image showing consecutive stills from a looped gif moving from left to right.
38
+ The gif is of {prompt}.
39
+ """
40
+
41
+ if randomize_seed:
42
+ seed = random.randint(0, MAX_SEED)
43
+
44
+ image = pipe(
45
+ prompt=prompt_template,
46
+ guidance_scale=guidance_scale,
47
+ num_inference_steps=num_inference_steps,
48
+ num_images_per_prompt=1,
49
+ generator=torch.Generator("cpu").manual_seed(seed),
50
+ height=HEIGHT,
51
+ width=WIDTH
52
+ ).images[0]
53
+
54
+ return export_to_gif(split_image(image, 4), "flux.gif", fps=4), image, seed
55
+
56
+ demo = gr.Interface(fn=predict, inputs="text", outputs="image")
57
+
58
+ css = """
59
+ footer {
60
+ visibility: hidden;
61
+ }
62
+ """
63
+
64
+
65
+ examples = [
66
+ "a cat waving its paws in the air",
67
+ "a panda moving their hips from side to side",
68
+ "a flower going through the process of blooming"
69
+ ]
70
+
71
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
72
+ with gr.Column(elem_id="col-container"):
73
+
74
+ with gr.Row():
75
+ prompt = gr.Text(label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt")
76
+ submit = gr.Button("Submit", scale=0)
77
+
78
+ output = gr.Image(label="GIF", show_label=False)
79
+ output_stills = gr.Image(label="stills", show_label=False, elem_id="stills")
80
+ with gr.Accordion("Advanced Settings", open=False):
81
+ seed = gr.Slider(
82
+ label="Seed",
83
+ minimum=0,
84
+ maximum=MAX_SEED,
85
+ step=1,
86
+ value=0,
87
+ )
88
+
89
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
90
+
91
+ with gr.Row():
92
+ guidance_scale = gr.Slider(
93
+ label="Guidance Scale",
94
+ minimum=1,
95
+ maximum=15,
96
+ step=0.1,
97
+ value=3.5,
98
+ )
99
+ num_inference_steps = gr.Slider(
100
+ label="Number of inference steps",
101
+ minimum=1,
102
+ maximum=50,
103
+ step=1,
104
+ value=28,
105
+ )
106
+
107
+ gr.Examples(
108
+ examples=examples,
109
+ fn=predict,
110
+ inputs=[prompt],
111
+ outputs=[output, output_stills, seed],
112
+ cache_examples="lazy"
113
+ )
114
+ gr.on(
115
+ triggers=[submit.click, prompt.submit],
116
+ fn=predict,
117
+ inputs=[prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
118
+ outputs = [output, output_stills, seed]
119
+ )
120
+
121
+ demo.launch()