aimersion commited on
Commit
819389a
·
verified ·
1 Parent(s): 6e8f9a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -37
app.py CHANGED
@@ -1,66 +1,110 @@
1
  import gradio as gr
 
 
2
  import torch
 
3
  from diffusers import DiffusionPipeline
4
 
5
- # Check for CUDA availability
 
6
  device = "cuda" if torch.cuda.is_available() else "cpu"
7
- if device == "cpu":
8
- print("Warning: CUDA is not available. This model may not run correctly on CPU.")
9
 
10
- # Load the model
11
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.float16)
12
- pipe = pipe.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- def infer(prompt, negative_prompt="", width=1024, height=1024, num_inference_steps=4, guidance_scale=7.5):
15
  try:
16
- # Generate the image
17
  image = pipe(
18
  prompt=prompt,
19
  negative_prompt=negative_prompt,
20
  width=width,
21
  height=height,
22
  num_inference_steps=num_inference_steps,
 
23
  guidance_scale=guidance_scale
24
  ).images[0]
25
-
26
- return image
27
  except Exception as e:
28
- return str(e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- # Gradio interface
31
- with gr.Blocks() as demo:
32
- gr.Markdown("# Image Generation with FLUX.1-schnell")
33
-
34
  with gr.Row():
35
- with gr.Column():
36
- prompt = gr.Textbox(label="Prompt")
37
- negative_prompt = gr.Textbox(label="Negative Prompt")
38
- generate_btn = gr.Button("Generate")
39
-
40
- image_output = gr.Image(label="Generated Image")
41
-
 
 
 
 
 
 
 
 
 
 
42
  with gr.Accordion("Advanced Settings", open=False):
43
- width = gr.Slider(minimum=256, maximum=1024, value=1024, step=32, label="Width")
44
- height = gr.Slider(minimum=256, maximum=1024, value=1024, step=32, label="Height")
45
- num_inference_steps = gr.Slider(minimum=1, maximum=50, value=4, step=1, label="Number of Inference Steps")
46
- guidance_scale = gr.Slider(minimum=0, maximum=20, value=7.5, step=0.1, label="Guidance Scale")
47
-
48
- generate_btn.click(
49
- infer,
50
- inputs=[prompt, negative_prompt, width, height, num_inference_steps, guidance_scale],
51
- outputs=[image_output]
52
- )
53
 
54
  gr.Examples(
55
- examples=[
56
- ["a cat sitting on a moon", "dog, low quality"],
57
- ["a futuristic city skyline", "old buildings, cars"],
58
- ],
59
  inputs=[prompt, negative_prompt],
60
- outputs=[image_output],
61
  fn=infer,
62
  cache_examples=True
63
  )
64
 
 
 
 
 
 
 
 
 
 
 
 
65
  if __name__ == "__main__":
66
  demo.launch()
 
1
  import gradio as gr
2
+ import numpy as np
3
+ import random
4
  import torch
5
+ import time
6
  from diffusers import DiffusionPipeline
7
 
8
+ # Set the device and dtype
9
+ dtype = torch.float16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
11
 
12
+ # Load the diffusion pipeline
13
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
14
+
15
+ MAX_SEED = np.iinfo(np.int32).max
16
+ MAX_IMAGE_SIZE = 2048
17
+
18
+ def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidance_scale=7.5, progress=gr.Progress(track_tqdm=True)):
19
+ start_time = time.time()
20
+
21
+ if width > MAX_IMAGE_SIZE or height > MAX_IMAGE_SIZE:
22
+ raise ValueError("Image size exceeds the maximum allowed dimensions.")
23
+
24
+ if randomize_seed:
25
+ seed = random.randint(0, MAX_SEED)
26
+ generator = torch.Generator(device=device).manual_seed(seed)
27
 
 
28
  try:
 
29
  image = pipe(
30
  prompt=prompt,
31
  negative_prompt=negative_prompt,
32
  width=width,
33
  height=height,
34
  num_inference_steps=num_inference_steps,
35
+ generator=generator,
36
  guidance_scale=guidance_scale
37
  ).images[0]
 
 
38
  except Exception as e:
39
+ print(f"Error generating image: {e}")
40
+ return None, seed, f"Error: {str(e)}"
41
+
42
+ if time.time() - start_time > 60:
43
+ return None, seed, "Image generation took too long and was cancelled."
44
+
45
+ return image, seed, None
46
+
47
+ examples = [
48
+ ["a tiny astronaut hatching from an egg on the moon", "blurry, low quality"],
49
+ ["a cat holding a sign that says hello world", "dog, text, writing"],
50
+ ["an anime illustration of a wiener schnitzel", "realistic, photograph"],
51
+ ]
52
+
53
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
54
+ gr.Markdown("""
55
+ # Custom Image Creator
56
+ 12B param rectified flow transformer distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) for 4 step generation
57
+ [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1)]
58
+ """)
59
 
 
 
 
 
60
  with gr.Row():
61
+ with gr.Column(scale=2):
62
+ prompt = gr.Textbox(
63
+ label="Prompt",
64
+ placeholder="Enter your prompt",
65
+ lines=3
66
+ )
67
+ negative_prompt = gr.Textbox(
68
+ label="Negative Prompt",
69
+ placeholder="Enter things to avoid in the image",
70
+ lines=2
71
+ )
72
+ run_button = gr.Button("Generate Image", variant="primary")
73
+
74
+ with gr.Column(scale=2):
75
+ result = gr.Image(label="Generated Image")
76
+ seed_output = gr.Number(label="Seed Used")
77
+
78
  with gr.Accordion("Advanced Settings", open=False):
79
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
80
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
81
+
82
+ with gr.Row():
83
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
84
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
85
+
86
+ with gr.Row():
87
+ num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=4)
88
+ guidance_scale = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=20.0, step=0.5, value=7.5)
89
 
90
  gr.Examples(
91
+ examples=examples,
 
 
 
92
  inputs=[prompt, negative_prompt],
93
+ outputs=[result, seed_output],
94
  fn=infer,
95
  cache_examples=True
96
  )
97
 
98
+ run_button.click(
99
+ fn=infer,
100
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, num_inference_steps, guidance_scale],
101
+ outputs=[result, seed_output]
102
+ )
103
+
104
+ gr.Markdown("""
105
+ ## Save Your Image
106
+ Right-click on the generated image and select 'Save image as' to download it.
107
+ """)
108
+
109
  if __name__ == "__main__":
110
  demo.launch()