kadirnar commited on
Commit
591a6d9
1 Parent(s): 4ea275b

Delete controlnet

Browse files
controlnet/controlnet_canny.py DELETED
@@ -1,66 +0,0 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from PIL import Image
5
- import numpy as np
6
- import torch
7
- import cv2
8
-
9
-
10
- def controlnet_canny(
11
- image_path:str,
12
- low_th:int,
13
- high_th:int,
14
- ):
15
- image = Image.open(image_path)
16
- image = np.array(image)
17
-
18
- image = cv2.Canny(image, low_th, high_th)
19
- image = image[:, :, None]
20
- image = np.concatenate([image, image, image], axis=2)
21
- image = Image.fromarray(image)
22
-
23
- controlnet = ControlNetModel.from_pretrained(
24
- "lllyasviel/sd-controlnet-canny",
25
- torch_dtype=torch.float16
26
- )
27
- return controlnet, image
28
-
29
-
30
- def stable_diffusion_controlnet_canny(
31
- stable_model_path:str,
32
- image_path:str,
33
- prompt:str,
34
- negative_prompt:str,
35
- num_samples:int,
36
- guidance_scale:int,
37
- num_inference_step:int,
38
- low_th:int,
39
- high_th:int
40
- ):
41
-
42
- controlnet, image = controlnet_canny(
43
- image_path=image_path,
44
- low_th=low_th,
45
- high_th=high_th
46
- )
47
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
48
- pretrained_model_name_or_path=stable_model_path,
49
- controlnet=controlnet,
50
- safety_checker=None,
51
- torch_dtype=torch.float16,
52
- )
53
- pipe.to("cuda")
54
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
55
- pipe.enable_xformers_memory_efficient_attention()
56
-
57
- output = pipe(
58
- prompt = prompt,
59
- image = image,
60
- negative_prompt = negative_prompt,
61
- num_images_per_prompt = num_samples,
62
- num_inference_steps = num_inference_step,
63
- guidance_scale = guidance_scale,
64
- ).images
65
-
66
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
controlnet/controlnet_depth.py DELETED
@@ -1,59 +0,0 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler,
3
- DDIMScheduler)
4
-
5
- from transformers import pipeline
6
- from PIL import Image
7
- import numpy as np
8
- import torch
9
-
10
-
11
- def controlnet_depth(image_path:str):
12
- depth_estimator = pipeline('depth-estimation')
13
-
14
- image = Image.open(image_path)
15
- image = depth_estimator(image)['depth']
16
- image = np.array(image)
17
- image = image[:, :, None]
18
- image = np.concatenate([image, image, image], axis=2)
19
- image = Image.fromarray(image)
20
-
21
- controlnet = ControlNetModel.from_pretrained(
22
- "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=torch.float16
23
- )
24
-
25
- return controlnet, image
26
-
27
- def stable_diffusion_controlnet_depth(
28
- stable_model_path:str,
29
- image_path:str,
30
- prompt:str,
31
- negative_prompt:str,
32
- num_samples:int,
33
- guidance_scale:int,
34
- num_inference_step:int,
35
- ):
36
-
37
- controlnet, image = controlnet_depth(image_path=image_path)
38
-
39
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
40
- pretrained_model_name_or_path=stable_model_path,
41
- controlnet=controlnet,
42
- safety_checker=None,
43
- torch_dtype=torch.float16
44
- )
45
-
46
- pipe.to("cuda")
47
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
48
- pipe.enable_xformers_memory_efficient_attention()
49
-
50
- output = pipe(
51
- prompt = prompt,
52
- image = image,
53
- negative_prompt = negative_prompt,
54
- num_images_per_prompt = num_samples,
55
- num_inference_steps = num_inference_step,
56
- guidance_scale = guidance_scale,
57
- ).images
58
-
59
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
controlnet/controlnet_hed.py DELETED
@@ -1,54 +0,0 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import HEDdetector
5
- from PIL import Image
6
- import torch
7
-
8
-
9
- def controlnet_hed(image_path:str):
10
- hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
11
-
12
- image = Image.open(image_path)
13
- image = hed(image)
14
-
15
- controlnet = ControlNetModel.from_pretrained(
16
- "fusing/stable-diffusion-v1-5-controlnet-hed",
17
- torch_dtype=torch.float16
18
- )
19
- return controlnet, image
20
-
21
-
22
- def stable_diffusion_controlnet_hed(
23
- stable_model_path:str,
24
- image_path:str,
25
- prompt:str,
26
- negative_prompt:str,
27
- num_samples:int,
28
- guidance_scale:int,
29
- num_inference_step:int,
30
- ):
31
-
32
- controlnet, image = controlnet_hed(image_path=image_path)
33
-
34
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
35
- pretrained_model_name_or_path=stable_model_path,
36
- controlnet=controlnet,
37
- safety_checker=None,
38
- torch_dtype=torch.float16
39
- )
40
-
41
- pipe.to("cuda")
42
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
43
- pipe.enable_xformers_memory_efficient_attention()
44
-
45
- output = pipe(
46
- prompt = prompt,
47
- image = image,
48
- negative_prompt = negative_prompt,
49
- num_images_per_prompt = num_samples,
50
- num_inference_steps = num_inference_step,
51
- guidance_scale = guidance_scale,
52
- ).images
53
-
54
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
controlnet/controlnet_mlsd.py DELETED
@@ -1,54 +0,0 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import MLSDdetector
5
- from PIL import Image
6
- import torch
7
-
8
-
9
- def controlnet_mlsd(image_path:str):
10
- mlsd = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
11
-
12
- image = Image.open(image_path)
13
- image = mlsd(image)
14
-
15
- controlnet = ControlNetModel.from_pretrained(
16
- "fusing/stable-diffusion-v1-5-controlnet-mlsd",
17
- torch_dtype=torch.float16
18
- )
19
-
20
- return controlnet, image
21
-
22
- def stable_diffusion_controlnet_mlsd(
23
- stable_model_path:str,
24
- image_path:str,
25
- prompt:str,
26
- negative_prompt:str,
27
- num_samples:int,
28
- guidance_scale:int,
29
- num_inference_step:int,
30
- ):
31
-
32
- controlnet, image = controlnet_mlsd(image_path=image_path)
33
-
34
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
35
- pretrained_model_name_or_path=stable_model_path,
36
- controlnet=controlnet,
37
- safety_checker=None,
38
- torch_dtype=torch.float16
39
- )
40
-
41
- pipe.to("cuda")
42
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
43
- pipe.enable_xformers_memory_efficient_attention()
44
-
45
- output = pipe(
46
- prompt = prompt,
47
- image = image,
48
- negative_prompt = negative_prompt,
49
- num_images_per_prompt = num_samples,
50
- num_inference_steps = num_inference_step,
51
- guidance_scale = guidance_scale,
52
- ).images
53
-
54
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
controlnet/controlnet_pose.py DELETED
@@ -1,55 +0,0 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import OpenposeDetector
5
-
6
- from PIL import Image
7
- import torch
8
-
9
-
10
- def controlnet_pose(image_path:str):
11
- openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
12
-
13
- image = Image.open(image_path)
14
- image = openpose(image)
15
-
16
- controlnet = ControlNetModel.from_pretrained(
17
- "fusing/stable-diffusion-v1-5-controlnet-openpose",
18
- torch_dtype=torch.float16
19
- )
20
-
21
- return controlnet, image
22
-
23
- def stable_diffusion_controlnet_pose(
24
- stable_model_path:str,
25
- image_path:str,
26
- prompt:str,
27
- negative_prompt:str,
28
- num_samples:int,
29
- guidance_scale:int,
30
- num_inference_step:int,
31
- ):
32
-
33
- controlnet, image = controlnet_pose(image_path=image_path)
34
-
35
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
36
- pretrained_model_name_or_path=stable_model_path,
37
- controlnet=controlnet,
38
- safety_checker=None,
39
- torch_dtype=torch.float16
40
- )
41
-
42
- pipe.to("cuda")
43
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
44
- pipe.enable_xformers_memory_efficient_attention()
45
-
46
- output = pipe(
47
- prompt = prompt,
48
- image = image,
49
- negative_prompt = negative_prompt,
50
- num_images_per_prompt = num_samples,
51
- num_inference_steps = num_inference_step,
52
- guidance_scale = guidance_scale,
53
- ).images
54
-
55
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
controlnet/controlnet_scribble.py DELETED
@@ -1,54 +0,0 @@
1
- from diffusers import ( StableDiffusionControlNetPipeline,
2
- ControlNetModel, UniPCMultistepScheduler)
3
-
4
- from controlnet_aux import HEDdetector
5
-
6
- from PIL import Image
7
- import torch
8
-
9
-
10
- def controlnet_scribble(image_path:str):
11
- hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
12
-
13
- image = Image.open(image_path)
14
- image = hed(image, scribble=True)
15
-
16
- controlnet = ControlNetModel.from_pretrained(
17
- "fusing/stable-diffusion-v1-5-controlnet-scribble", torch_dtype=torch.float16
18
- )
19
-
20
- return controlnet, image
21
-
22
- def stable_diffusion_controlnet_scribble(
23
- stable_model_path:str,
24
- image_path:str,
25
- prompt:str,
26
- negative_prompt:str,
27
- num_samples:int,
28
- guidance_scale:int,
29
- num_inference_step:int,
30
- ):
31
-
32
- controlnet, image = controlnet_scribble(image_path=image_path)
33
-
34
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
35
- pretrained_model_name_or_path=stable_model_path,
36
- controlnet=controlnet,
37
- safety_checker=None,
38
- torch_dtype=torch.float16
39
- )
40
-
41
- pipe.to("cuda")
42
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
43
- pipe.enable_xformers_memory_efficient_attention()
44
-
45
- output = pipe(
46
- prompt = prompt,
47
- image = image,
48
- negative_prompt = negative_prompt,
49
- num_images_per_prompt = num_samples,
50
- num_inference_steps = num_inference_step,
51
- guidance_scale = guidance_scale,
52
- ).images
53
-
54
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
controlnet/controlnet_seg.py DELETED
@@ -1,113 +0,0 @@
1
- from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
2
- import torch
3
- from diffusers import (StableDiffusionControlNetPipeline,
4
- ControlNetModel, UniPCMultistepScheduler)
5
-
6
-
7
- from PIL import Image
8
- import numpy as np
9
- import torch
10
-
11
-
12
- def ade_palette():
13
- """ADE20K palette that maps each class to RGB values."""
14
- return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
15
- [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
16
- [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
17
- [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
18
- [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
19
- [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
20
- [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
21
- [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
22
- [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
23
- [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
24
- [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
25
- [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
26
- [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
27
- [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
28
- [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
29
- [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
30
- [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
31
- [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
32
- [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
33
- [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
34
- [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
35
- [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
36
- [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
37
- [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
38
- [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
39
- [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
40
- [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
41
- [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
42
- [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
43
- [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
44
- [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
45
- [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
46
- [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
47
- [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
48
- [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
49
- [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
50
- [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
51
- [102, 255, 0], [92, 0, 255]]
52
-
53
-
54
- def controlnet_mlsd(image_path:str):
55
- image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
56
- image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
57
-
58
- image = Image.open(image_path).convert('RGB')
59
- pixel_values = image_processor(image, return_tensors="pt").pixel_values
60
-
61
- with torch.no_grad():
62
- outputs = image_segmentor(pixel_values)
63
-
64
- seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
65
-
66
- color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
67
- palette = np.array(ade_palette())
68
-
69
- for label, color in enumerate(palette):
70
- color_seg[seg == label, :] = color
71
-
72
- color_seg = color_seg.astype(np.uint8)
73
- image = Image.fromarray(color_seg)
74
- controlnet = ControlNetModel.from_pretrained(
75
- "fusing/stable-diffusion-v1-5-controlnet-seg", torch_dtype=torch.float16
76
- )
77
-
78
- return controlnet, image
79
-
80
-
81
- def stable_diffusion_controlnet_seg(
82
- stable_model_path:str,
83
- image_path:str,
84
- prompt:str,
85
- negative_prompt:str,
86
- num_samples:int,
87
- guidance_scale:int,
88
- num_inference_step:int,
89
- ):
90
-
91
- controlnet, image = controlnet_mlsd(image_path=image_path)
92
-
93
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
94
- pretrained_model_name_or_path=stable_model_path,
95
- controlnet=controlnet,
96
- safety_checker=None,
97
- torch_dtype=torch.float16
98
- )
99
-
100
- pipe.to("cuda")
101
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
102
- pipe.enable_xformers_memory_efficient_attention()
103
-
104
- output = pipe(
105
- prompt = prompt,
106
- image = image,
107
- negative_prompt = negative_prompt,
108
- num_images_per_prompt = num_samples,
109
- num_inference_steps = num_inference_step,
110
- guidance_scale = guidance_scale,
111
- ).images
112
-
113
- return output