patrickvonplaten commited on
Commit
cee5490
1 Parent(s): 912b6df
benchmark/if.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import DiffusionPipeline
3
+ import torch
4
+
5
+ run_compile = True # Set True / False
6
+
7
+ pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16)
8
+ pipe.to("cuda")
9
+ pipe_2 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-II-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16)
10
+ pipe_2.to("cuda")
11
+ pipe_3 = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16)
12
+ pipe_3.to("cuda")
13
+
14
+
15
+ pipe.unet.to(memory_format=torch.channels_last)
16
+ pipe_2.unet.to(memory_format=torch.channels_last)
17
+ pipe_3.unet.to(memory_format=torch.channels_last)
18
+
19
+ if run_compile:
20
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
21
+ pipe_2.unet = torch.compile(pipe_2.unet, mode="reduce-overhead", fullgraph=True)
22
+ pipe_3.unet = torch.compile(pipe_3.unet, mode="reduce-overhead", fullgraph=True)
23
+
24
+ prompt = "the blue hulk"
25
+
26
+ prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16)
27
+ neg_prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16)
28
+
29
+ for _ in range(3):
30
+ image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images
31
+ image_2 = pipe_2(image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images
32
+ image_3 = pipe_3(prompt=prompt, image=image, noise_level=100).images
benchmark/if_no_compile.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import DiffusionPipeline
3
+ import torch
4
+
5
+ run_compile = False# Set True / False
6
+
7
+ pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16)
8
+ pipe.to("cuda")
9
+ pipe_2 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-II-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16)
10
+ pipe_2.to("cuda")
11
+ pipe_3 = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16)
12
+ pipe_3.to("cuda")
13
+
14
+
15
+ pipe.unet.to(memory_format=torch.channels_last)
16
+ pipe_2.unet.to(memory_format=torch.channels_last)
17
+ pipe_3.unet.to(memory_format=torch.channels_last)
18
+
19
+ if run_compile:
20
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
21
+ pipe_2.unet = torch.compile(pipe_2.unet, mode="reduce-overhead", fullgraph=True)
22
+ pipe_3.unet = torch.compile(pipe_3.unet, mode="reduce-overhead", fullgraph=True)
23
+
24
+ prompt = "the blue hulk"
25
+
26
+ prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16)
27
+ neg_prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16)
28
+
29
+ for _ in range(3):
30
+ image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images
31
+ image_2 = pipe_2(image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images
32
+ image_3 = pipe_3(prompt=prompt, image=image, noise_level=100).images
benchmark/sd_controlnet.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
3
+ import requests
4
+ import torch
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
9
+
10
+ response = requests.get(url)
11
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
12
+ init_image = init_image.resize((512, 512))
13
+
14
+ path = "runwayml/stable-diffusion-v1-5"
15
+
16
+ run_compile = True # Set True / False
17
+ controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
18
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
19
+ path, controlnet=controlnet, torch_dtype=torch.float16
20
+ )
21
+
22
+ pipe = pipe.to("cuda:0")
23
+ pipe.unet.to(memory_format=torch.channels_last)
24
+ pipe.controlnet.to(memory_format=torch.channels_last)
25
+
26
+ if run_compile:
27
+ print("Run torch compile")
28
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
29
+ pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True)
30
+
31
+ prompt = "ghibli style, a fantasy landscape with castles"
32
+
33
+ for _ in range(3):
34
+ image = pipe(prompt=prompt, image=init_image).images[0]
benchmark/sd_controlnet_no_compile.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
3
+ import requests
4
+ import torch
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
9
+
10
+ response = requests.get(url)
11
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
12
+ init_image = init_image.resize((512, 512))
13
+
14
+ path = "runwayml/stable-diffusion-v1-5"
15
+
16
+ run_compile = False # Set True / False
17
+ controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
18
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
19
+ path, controlnet=controlnet, torch_dtype=torch.float16
20
+ )
21
+
22
+ pipe = pipe.to("cuda:0")
23
+ pipe.unet.to(memory_format=torch.channels_last)
24
+ pipe.controlnet.to(memory_format=torch.channels_last)
25
+
26
+ if run_compile:
27
+ print("Run torch compile")
28
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
29
+ pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True)
30
+
31
+ prompt = "ghibli style, a fantasy landscape with castles"
32
+
33
+ for _ in range(3):
34
+ image = pipe(prompt=prompt, image=init_image).images[0]
benchmark/sd_img2img.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import StableDiffusionImg2ImgPipeline
3
+ import requests
4
+ import torch
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
9
+
10
+ response = requests.get(url)
11
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
12
+ init_image = init_image.resize((512, 512))
13
+
14
+ path = "runwayml/stable-diffusion-v1-5"
15
+
16
+ run_compile = True # Set True / False
17
+
18
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16)
19
+ pipe = pipe.to("cuda:0")
20
+ pipe.unet.to(memory_format=torch.channels_last)
21
+
22
+ if run_compile:
23
+ print("Run torch compile")
24
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
25
+
26
+ prompt = "ghibli style, a fantasy landscape with castles"
27
+
28
+ for _ in range(3):
29
+ image = pipe(prompt=prompt, image=init_image).images[0]
benchmark/sd_img2img_no_compile.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import StableDiffusionImg2ImgPipeline
3
+ import requests
4
+ import torch
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
9
+
10
+ response = requests.get(url)
11
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
12
+ init_image = init_image.resize((512, 512))
13
+
14
+ path = "runwayml/stable-diffusion-v1-5"
15
+
16
+ run_compile = False # Set True / False
17
+
18
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16)
19
+ pipe = pipe.to("cuda:0")
20
+ pipe.unet.to(memory_format=torch.channels_last)
21
+
22
+ if run_compile:
23
+ print("Run torch compile")
24
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
25
+
26
+ prompt = "ghibli style, a fantasy landscape with castles"
27
+
28
+ for _ in range(3):
29
+ image = pipe(prompt=prompt, image=init_image).images[0]
benchmark/sd_inpaint.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import StableDiffusionInpaintPipeline
3
+ import requests
4
+ import torch
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
9
+
10
+ def download_image(url):
11
+ response = requests.get(url)
12
+ return Image.open(BytesIO(response.content)).convert("RGB")
13
+
14
+
15
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
16
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
17
+
18
+ init_image = download_image(img_url).resize((512, 512))
19
+ mask_image = download_image(mask_url).resize((512, 512))
20
+
21
+ path = "runwayml/stable-diffusion-inpainting"
22
+
23
+ run_compile = True # Set True / False
24
+
25
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(path, torch_dtype=torch.float16)
26
+ pipe = pipe.to("cuda:0")
27
+ pipe.unet.to(memory_format=torch.channels_last)
28
+
29
+ if run_compile:
30
+ print("Run torch compile")
31
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
32
+
33
+ prompt = "ghibli style, a fantasy landscape with castles"
34
+
35
+ for _ in range(3):
36
+ image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
benchmark/sd_inpaint_no_compile.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import StableDiffusionInpaintPipeline
3
+ import requests
4
+ import torch
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
9
+
10
+ def download_image(url):
11
+ response = requests.get(url)
12
+ return Image.open(BytesIO(response.content)).convert("RGB")
13
+
14
+
15
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
16
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
17
+
18
+ init_image = download_image(img_url).resize((512, 512))
19
+ mask_image = download_image(mask_url).resize((512, 512))
20
+
21
+ path = "runwayml/stable-diffusion-inpainting"
22
+
23
+ run_compile = False # Set True / False
24
+
25
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(path, torch_dtype=torch.float16)
26
+ pipe = pipe.to("cuda:0")
27
+ pipe.unet.to(memory_format=torch.channels_last)
28
+
29
+ if run_compile:
30
+ print("Run torch compile")
31
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
32
+
33
+ prompt = "ghibli style, a fantasy landscape with castles"
34
+
35
+ for _ in range(3):
36
+ image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
benchmark/sd_txt2img.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import DiffusionPipeline
3
+ import torch
4
+
5
+ path = "runwayml/stable-diffusion-v1-5"
6
+
7
+ run_compile = True # Set True / False
8
+
9
+ pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
10
+ pipe = pipe.to("cuda:0")
11
+ pipe.unet.to(memory_format=torch.channels_last)
12
+
13
+ if run_compile:
14
+ print("Run torch compile")
15
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
16
+
17
+ prompt = "ghibli style, a fantasy landscape with castles"
18
+
19
+ for _ in range(3):
20
+ images = pipe(prompt=prompt).images
benchmark/sd_txt2img_no_compile.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import DiffusionPipeline
3
+ import torch
4
+
5
+ path = "runwayml/stable-diffusion-v1-5"
6
+
7
+ run_compile = False # Set True / False
8
+
9
+ pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
10
+ pipe = pipe.to("cuda:0")
11
+ pipe.unet.to(memory_format=torch.channels_last)
12
+
13
+ if run_compile:
14
+ print("Run torch compile")
15
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
16
+
17
+ prompt = "ghibli style, a fantasy landscape with castles"
18
+
19
+ for _ in range(3):
20
+ images = pipe(prompt=prompt).images