Deadmon commited on
Commit
754b60e
1 Parent(s): 013bfbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -132
app.py CHANGED
@@ -1,133 +1,33 @@
1
- import os
2
  import torch
3
- import gradio as gr
4
- import numpy as np
5
- from PIL import Image
6
- from einops import rearrange
7
- import requests
8
- import spaces
9
- from huggingface_hub import login
10
- from gradio_imageslider import ImageSlider # Import ImageSlider
11
-
12
- from image_datasets.canny_dataset import canny_processor, c_crop
13
- from src.flux.sampling import denoise_controlnet, get_noise, get_schedule, prepare, unpack
14
- from src.flux.util import load_ae, load_clip, load_t5, load_flow_model, load_controlnet, load_safetensors
15
-
16
- # Download and load the ControlNet model
17
- controlnet_model = 'InstantX/FLUX.1-dev-Controlnet-Union'
18
-
19
- # Source: https://github.com/XLabs-AI/x-flux.git
20
- name = "flux-dev"
21
- device = torch.device("cuda")
22
- offload = False
23
- is_schnell = name == "flux-schnell"
24
-
25
- model, ae, t5, clip, controlnet = None, None, None, None, None
26
-
27
- def load_models():
28
- global model, ae, t5, clip, controlnet
29
- t5 = load_t5(device, max_length=256 if is_schnell else 512)
30
- clip = load_clip(device)
31
- model = load_flow_model(name, device=device)
32
- ae = load_ae(name, device=device)
33
- controlnet = load_controlnet(controlnet_model, device).to(device).to(torch.bfloat16)
34
-
35
- load_models()
36
-
37
- def preprocess_image(image, target_width, target_height, crop=True):
38
- if crop:
39
- image = c_crop(image) # Crop the image to square
40
- original_width, original_height = image.size
41
-
42
- # Resize to match the target size without stretching
43
- scale = max(target_width / original_width, target_height / original_height)
44
- resized_width = int(scale * original_width)
45
- resized_height = int(scale * original_height)
46
-
47
- image = image.resize((resized_width, resized_height), Image.LANCZOS)
48
-
49
- # Center crop to match the target dimensions
50
- left = (resized_width - target_width) // 2
51
- top = (resized_height - target_height) // 2
52
- image = image.crop((left, top, left + target_width, top + target_height))
53
- else:
54
- image = image.resize((target_width, target_height), Image.LANCZOS)
55
-
56
- return image
57
-
58
- def preprocess_canny_image(image, target_width, target_height, crop=True):
59
- image = preprocess_image(image, target_width, target_height, crop=crop)
60
- image = canny_processor(image)
61
- return image
62
-
63
- @spaces.GPU(duration=120)
64
- def generate_image(prompt, control_image, control_mode, num_steps=50, guidance=4, width=512, height=512, seed=42, random_seed=False):
65
- if random_seed:
66
- seed = np.random.randint(0, 10000)
67
-
68
- if not os.path.isdir("./controlnet_results/"):
69
- os.makedirs("./controlnet_results/")
70
-
71
- torch_device = torch.device("cuda")
72
-
73
- model.to(torch_device)
74
- t5.to(torch_device)
75
- clip.to(torch_device)
76
- ae.to(torch_device)
77
- controlnet.to(torch_device)
78
-
79
- width = 16 * width // 16
80
- height = 16 * height // 16
81
- timesteps = get_schedule(num_steps, (width // 8) * (height // 8) // (16 * 16), shift=(not is_schnell))
82
-
83
- processed_input = preprocess_image(control_image, width, height)
84
- canny_processed = preprocess_canny_image(control_image, width, height)
85
- controlnet_cond = torch.from_numpy((np.array(canny_processed) / 127.5) - 1)
86
- controlnet_cond = controlnet_cond.permute(2, 0, 1).unsqueeze(0).to(torch.bfloat16).to(torch_device)
87
-
88
- torch.manual_seed(seed)
89
- with torch.no_grad():
90
- x = get_noise(1, height, width, device=torch_device, dtype=torch.bfloat16, seed=seed)
91
- inp_cond = prepare(t5=t5, clip=clip, img=x, prompt=prompt)
92
-
93
- x = denoise_controlnet(model, **inp_cond, controlnet=controlnet, timesteps=timesteps, guidance=guidance, controlnet_cond=controlnet_cond, control_mode=control_modes.index(control_mode))
94
-
95
- x = unpack(x.float(), height, width)
96
- x = ae.decode(x)
97
-
98
- x1 = x.clamp(-1, 1)
99
- x1 = rearrange(x1[-1], "c h w -> h w c")
100
- output_img = Image.fromarray((127.5 * (x1 + 1.0)).cpu().byte().numpy())
101
-
102
- return [processed_input, output_img] # Return both images for slider
103
-
104
- control_modes = [
105
- "canny",
106
- "tile",
107
- "depth",
108
- "blur",
109
- "pose",
110
- "gray",
111
- "lq"
112
- ]
113
-
114
- interface = gr.Interface(
115
- fn=generate_image,
116
- inputs=[
117
- gr.Textbox(label="Prompt"),
118
- gr.Image(type="pil", label="Control Image"),
119
- gr.Dropdown(choices=control_modes, value="canny", label="Control Mode"),
120
- gr.Slider(step=1, minimum=1, maximum=64, value=28, label="Num Steps"),
121
- gr.Slider(minimum=0.1, maximum=10, value=4, label="Guidance"),
122
- gr.Slider(minimum=128, maximum=2048, step=128, value=1024, label="Width"),
123
- gr.Slider(minimum=128, maximum=2048, step=128, value=1024, label="Height"),
124
- gr.Number(value=42, label="Seed"),
125
- gr.Checkbox(label="Random Seed")
126
- ],
127
- outputs=ImageSlider(label="Before / After"), # Use ImageSlider as the output
128
- title="FLUX.1 Controlnet Canny",
129
- description="Generate images using ControlNet and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]"
130
- )
131
-
132
- if __name__ == "__main__":
133
- interface.launch(share=True)
 
 
1
  import torch
2
+ from diffusers.utils import load_image
3
+ from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel
4
+
5
+ base_model = 'black-forest-labs/FLUX.1-dev'
6
+ controlnet_model_union = 'InstantX/FLUX.1-dev-Controlnet-Union'
7
+
8
+ controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union, torch_dtype=torch.bfloat16)
9
+ controlnet = FluxMultiControlNetModel([controlnet_union]) # we always recommend loading via FluxMultiControlNetModel
10
+
11
+ pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
12
+ pipe.to("cuda")
13
+
14
+ prompt = 'A bohemian-style female travel blogger with sun-kissed skin and messy beach waves.'
15
+ control_image_depth = load_image("https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union/resolve/main/images/depth.jpg")
16
+ control_mode_depth = 2
17
+
18
+ control_image_canny = load_image("https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union/resolve/main/images/canny.jpg")
19
+ control_mode_canny = 0
20
+
21
+ width, height = control_image.size
22
+
23
+ image = pipe(
24
+ prompt,
25
+ control_image=[control_image_depth, control_image_canny],
26
+ control_mode=[control_mode_depth, control_mode_canny],
27
+ width=width,
28
+ height=height,
29
+ controlnet_conditioning_scale=[0.2, 0.4],
30
+ num_inference_steps=24,
31
+ guidance_scale=3.5,
32
+ generator=torch.manual_seed(42),
33
+ ).images[0]