Deddy commited on
Commit
da8aae8
1 Parent(s): 97c54f6

Delete app_backup.py

Browse files
Files changed (1) hide show
  1. app_backup.py +0 -165
app_backup.py DELETED
@@ -1,165 +0,0 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
- import spaces
5
- import torch
6
- import time
7
- from diffusers import DiffusionPipeline
8
- from custom_pipeline import FLUXPipelineWithIntermediateOutputs
9
-
10
- # Constants
11
- MAX_SEED = np.iinfo(np.int32).max
12
- MAX_IMAGE_SIZE = 2048
13
- DEFAULT_WIDTH = 1024
14
- DEFAULT_HEIGHT = 1024
15
- DEFAULT_INFERENCE_STEPS = 1
16
-
17
- # Device and model setup
18
- dtype = torch.float16
19
- pipe = FLUXPipelineWithIntermediateOutputs.from_pretrained(
20
- "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
21
- ).to("cuda")
22
- torch.cuda.empty_cache()
23
-
24
- # Inference function
25
- @spaces.GPU(duration=25)
26
- def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)):
27
- if randomize_seed:
28
- seed = random.randint(0, MAX_SEED)
29
- generator = torch.Generator().manual_seed(int(float(seed)))
30
-
31
- start_time = time.time()
32
-
33
- # Only generate the last image in the sequence
34
- for img in pipe.generate_images(
35
- prompt=prompt,
36
- guidance_scale=0, # as Flux schnell is guidance free
37
- num_inference_steps=num_inference_steps,
38
- width=width,
39
- height=height,
40
- generator=generator
41
- ):
42
- latency = f"Latency: {(time.time()-start_time):.2f} seconds"
43
- yield img, seed, latency
44
-
45
- # Example prompts
46
- examples = [
47
- "a tiny astronaut hatching from an egg on the moon",
48
- "a cute white cat holding a sign that says hello world",
49
- "an anime illustration of a wiener schnitzel",
50
- "Create mage of Modern house in minecraft style",
51
- "Imagine steve jobs as Star Wars movie character",
52
- "Lion",
53
- "Photo of a young woman with long, wavy brown hair tied in a bun and glasses. She has a fair complexion and is wearing subtle makeup, emphasizing her eyes and lips. She is dressed in a black top. The background appears to be an urban setting with a building facade, and the sunlight casts a warm glow on her face.",
54
- ]
55
-
56
- # --- Gradio UI ---
57
- with gr.Blocks() as demo:
58
- with gr.Column(elem_id="app-container"):
59
- gr.Markdown("# 🎨 Realtime FLUX Image Generator")
60
- gr.Markdown("Generate stunning images in real-time with Modified Flux.Schnell pipeline.")
61
- gr.Markdown("<span style='color: red;'>Note: Sometimes it stucks or stops generating images (I don't know why). In that situation just refresh the site.</span>")
62
-
63
- with gr.Row():
64
- with gr.Column(scale=2.5):
65
- result = gr.Image(label="Generated Image", show_label=False, interactive=False)
66
- with gr.Column(scale=1):
67
- prompt = gr.Text(
68
- label="Prompt",
69
- placeholder="Describe the image you want to generate...",
70
- lines=3,
71
- show_label=False,
72
- container=False,
73
- )
74
- generateBtn = gr.Button("🖼️ Generate Image")
75
- enhanceBtn = gr.Button("🚀 Enhance Image")
76
-
77
- with gr.Column("Advanced Options"):
78
- with gr.Row():
79
- realtime = gr.Checkbox(label="Realtime Toggler", info="If TRUE then uses more GPU but create image in realtime.", value=False)
80
- latency = gr.Text(label="Latency")
81
- with gr.Row():
82
- seed = gr.Number(label="Seed", value=42)
83
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=False)
84
- with gr.Row():
85
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
86
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
87
- num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
88
-
89
- with gr.Row():
90
- gr.Markdown("### 🌟 Inspiration Gallery")
91
- with gr.Row():
92
- gr.Examples(
93
- examples=examples,
94
- fn=generate_image,
95
- inputs=[prompt],
96
- outputs=[result, seed, latency],
97
- cache_examples="lazy"
98
- )
99
-
100
- def enhance_image(*args):
101
- gr.Info("Enhancing Image") # currently just runs optimized pipeline for 2 steps. Further implementations later.
102
- return next(generate_image(*args))
103
-
104
- enhanceBtn.click(
105
- fn=enhance_image,
106
- inputs=[prompt, seed, width, height],
107
- outputs=[result, seed, latency],
108
- show_progress="hidden",
109
- api_name="Enhance",
110
- queue=False,
111
- concurrency_limit=None
112
- )
113
-
114
- generateBtn.click(
115
- fn=generate_image,
116
- inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
117
- outputs=[result, seed, latency],
118
- show_progress="full",
119
- api_name="RealtimeFlux",
120
- queue=False,
121
- concurrency_limit=None
122
- )
123
-
124
- def update_ui(realtime_enabled):
125
- return {
126
- prompt: gr.update(interactive=True),
127
- generateBtn: gr.update(visible=not realtime_enabled)
128
- }
129
-
130
- realtime.change(
131
- fn=update_ui,
132
- inputs=[realtime],
133
- outputs=[prompt, generateBtn],
134
- queue=False,
135
- concurrency_limit=None
136
- )
137
-
138
- def realtime_generation(*args):
139
- if args[0]: # If realtime is enabled
140
- return next(generate_image(*args[1:]))
141
-
142
- prompt.submit(
143
- fn=generate_image,
144
- inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
145
- outputs=[result, seed, latency],
146
- show_progress="full",
147
- api_name=False,
148
- queue=False,
149
- concurrency_limit=None
150
- )
151
-
152
- for component in [prompt, width, height, num_inference_steps]:
153
- component.input(
154
- fn=realtime_generation,
155
- inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps],
156
- outputs=[result, seed, latency],
157
- show_progress="hidden",
158
- api_name=False,
159
- trigger_mode="always_last",
160
- queue=False,
161
- concurrency_limit=None
162
- )
163
-
164
- # Launch the app
165
- demo.launch()