Deadmon commited on
Commit
e19c312
1 Parent(s): 213883e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -16
app.py CHANGED
@@ -4,14 +4,19 @@ from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiCont
4
  import gradio as gr
5
  import spaces
6
 
 
 
 
7
  base_model = 'black-forest-labs/FLUX.1-dev'
8
  controlnet_model_union = 'InstantX/FLUX.1-dev-Controlnet-Union'
9
 
10
- controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union, torch_dtype=torch.bfloat16)
11
- controlnet = FluxMultiControlNetModel([controlnet_union]) # we always recommend loading via FluxMultiControlNetModel
 
 
12
 
13
- pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
14
- pipe.to("cuda")
15
 
16
  control_modes = [
17
  "canny",
@@ -46,17 +51,21 @@ def generate_image(prompt, control_image_depth, control_mode_depth_index, use_de
46
  width, height = control_image_canny.shape[:2]
47
  adjusted_width, adjusted_height = adjust_dimensions(width, height)
48
 
49
- image = pipe(
50
- prompt,
51
- control_image=control_images,
52
- control_mode=control_modes,
53
- width=adjusted_width,
54
- height=adjusted_height,
55
- controlnet_conditioning_scale=conditioning_scales,
56
- num_inference_steps=24,
57
- guidance_scale=3.5,
58
- generator=torch.manual_seed(42),
59
- ).images[0]
 
 
 
 
60
 
61
  return image
62
 
@@ -75,4 +84,4 @@ iface = gr.Interface(
75
  description="Generate an image using FluxControlNet with depth and canny control images.",
76
  )
77
 
78
- iface.launch(share=True)
 
4
  import gradio as gr
5
  import spaces
6
 
7
+ # Ensure that you're using the appropriate data type for your GPU
8
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
9
+
10
  base_model = 'black-forest-labs/FLUX.1-dev'
11
  controlnet_model_union = 'InstantX/FLUX.1-dev-Controlnet-Union'
12
 
13
+ controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union, torch_dtype=torch_dtype)
14
+ controlnet = FluxMultiControlNetModel([controlnet_union])
15
+
16
+ pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch_dtype)
17
 
18
+ # If you encounter issues with CUDA, you can run this on the CPU for debugging
19
+ pipe.to("cuda" if torch.cuda.is_available() else "cpu")
20
 
21
  control_modes = [
22
  "canny",
 
51
  width, height = control_image_canny.shape[:2]
52
  adjusted_width, adjusted_height = adjust_dimensions(width, height)
53
 
54
+ try:
55
+ image = pipe(
56
+ prompt,
57
+ control_image=control_images,
58
+ control_mode=control_modes,
59
+ width=adjusted_width,
60
+ height=adjusted_height,
61
+ controlnet_conditioning_scale=conditioning_scales,
62
+ num_inference_steps=24,
63
+ guidance_scale=3.5,
64
+ generator=torch.manual_seed(42),
65
+ ).images[0]
66
+ except RuntimeError as e:
67
+ torch.cuda.empty_cache()
68
+ raise e
69
 
70
  return image
71
 
 
84
  description="Generate an image using FluxControlNet with depth and canny control images.",
85
  )
86
 
87
+ iface.launch(share=True)