JCTN commited on
Commit
6b5b09b
1 Parent(s): 76b1e3c

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +227 -0
  2. gitattributes.txt +34 -0
  3. requirements.txt +12 -0
app.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
2
+ from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
3
+ from PIL import Image
4
+ import gradio as gr
5
+ import numpy as np
6
+ import requests
7
+ import torch
8
+ import gc
9
+
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+
12
+ # Download and Create SAM Model
13
+
14
+ print("[Downloading SAM Weights]")
15
+ SAM_URL = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
16
+
17
+ r = requests.get(SAM_URL, allow_redirects=True)
18
+
19
+ print("[Writing SAM Weights]")
20
+
21
+ with open("./sam_vit_h_4b8939.pth", "wb") as sam_weights:
22
+ sam_weights.write(r.content)
23
+
24
+ del r
25
+ gc.collect()
26
+
27
+ sam = sam_model_registry["vit_h"](checkpoint="./sam_vit_h_4b8939.pth").to(device)
28
+
29
+ mask_generator = SamAutomaticMaskGenerator(sam)
30
+ gc.collect()
31
+
32
+ # Create ControlNet Pipeline
33
+
34
+ print("Creating ControlNet Pipeline")
35
+
36
+ controlnet = ControlNetModel.from_pretrained(
37
+ "mfidabel/controlnet-segment-anything", torch_dtype=torch.float16
38
+ ).to(device)
39
+
40
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
41
+ "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, safety_check=None
42
+ ).to(device)
43
+
44
+
45
+ # Description
46
+ title = "# 🧨 ControlNet on Segment Anything 🤗"
47
+ description = """This is a demo on 🧨 ControlNet based on Meta's [Segment Anything Model](https://segment-anything.com/).
48
+
49
+ Upload an Image, Segment it with Segment Anything, write a prompt, and generate images 🤗
50
+
51
+ ⌛️ It takes about 20~ seconds to generate 4 samples, to get faster results, don't forget to reduce the Nº Samples to 1.
52
+
53
+ You can obtain the Segmentation Map of any Image through this Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mfidabel/JAX_SPRINT_2023/blob/main/Segment_Anything_JAX_SPRINT.ipynb)
54
+
55
+ A huge thanks goes out to @GoogleCloud, for providing us with powerful TPUs that enabled us to train this model; and to the @HuggingFace Team for organizing the sprint.
56
+
57
+ Check out our [Model Card 🧨](https://huggingface.co/mfidabel/controlnet-segment-anything)
58
+
59
+ """
60
+
61
+ about = """
62
+ # 👨‍💻 About the model
63
+
64
+ This [model](https://huggingface.co/mfidabel/controlnet-segment-anything) is based on the [ControlNet Model](https://huggingface.co/blog/controlnet), which allow us to generate Images using some sort of condition image. For this model, we selected the segmentation maps produced by Meta's new segmentation model called [Segment Anything Model](https://github.com/facebookresearch/segment-anything) as the condition image. We then trained the model to generate images based on the structure of the segmentation maps and the text prompts given.
65
+
66
+
67
+
68
+ # 💾 About the dataset
69
+
70
+ For the training, we generated a segmented dataset based on the [COYO-700M](https://huggingface.co/datasets/kakaobrain/coyo-700m) dataset. The dataset provided us with the images, and the text prompts. For the segmented images, we used [Segment Anything Model](https://github.com/facebookresearch/segment-anything). We then created 8k samples to train our model on, which isn't a lot, but as a team, we have been very busy with many other responsibilities and time constraints, which made it challenging to dedicate a lot of time to generating a larger dataset. Despite the constraints we faced, we have still managed to achieve some nice results 🙌
71
+
72
+ You can check the generated datasets below ⬇️
73
+ - [sam-coyo-2k](https://huggingface.co/datasets/mfidabel/sam-coyo-2k)
74
+ - [sam-coyo-2.5k](https://huggingface.co/datasets/mfidabel/sam-coyo-2.5k)
75
+ - [sam-coyo-3k](https://huggingface.co/datasets/mfidabel/sam-coyo-3k)
76
+
77
+ """
78
+
79
+ gif_html = """ <img src="https://github.com/mfidabel/JAX_SPRINT_2023/blob/8632f0fde7388d7a4fc57225c96ef3b8411b3648/EX_1.gif?raw=true" alt= “” height="50%" class="about"> """
80
+
81
+ examples = [["photo of a futuristic dining table, high quality, tricolor", "low quality, deformed, blurry, points", "examples/condition_image_1.jpeg"],
82
+ ["a monochrome photo of henry cavil using a shirt, high quality", "low quality, low res, deformed", "examples/condition_image_2.jpeg"],
83
+ ["photo of a japanese living room, high quality, coherent", "low quality, colors, saturation, extreme brightness, blurry, low res", "examples/condition_image_3.jpeg"],
84
+ ["living room, detailed, high quality", "low quality, low resolution, render, oversaturated, low contrast", "examples/condition_image_4.jpeg"],
85
+ ["painting of the bodiam castle, Vicent Van Gogh style, Starry Night", "low quality, low resolution, render, oversaturated, low contrast", "examples/condition_image_5.jpeg"],
86
+ ["painting of food, olive oil can, purple wine, green cabbage, chili peppers, pablo picasso style, high quality", "low quality, low resolution, render, oversaturated, low contrast, realistic", "examples/condition_image_6.jpeg"],
87
+ ["Katsushika Hokusai painting of mountains, a sky and desert landscape, The Great Wave off Kanagawa style, colorful",
88
+ "low quality, low resolution, render, oversaturated, low contrast, realistic", "examples/condition_image_7.jpeg"]]
89
+
90
+ default_example = examples[4]
91
+
92
+ examples = examples[::-1]
93
+
94
+ css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
95
+
96
+ # Inference Function
97
+ def show_anns(anns):
98
+ if len(anns) == 0:
99
+ return
100
+ sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
101
+ h, w = anns[0]['segmentation'].shape
102
+ final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB")
103
+ for ann in sorted_anns:
104
+ m = ann['segmentation']
105
+ img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8)
106
+ for i in range(3):
107
+ img[:,:,i] = np.random.randint(255, dtype=np.uint8)
108
+ final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m*255)))
109
+
110
+ return final_img
111
+
112
+ def segment_image(image, seed = 0):
113
+ # Generate Masks
114
+ np.random.seed(int(seed))
115
+ masks = mask_generator.generate(image)
116
+ torch.cuda.empty_cache()
117
+ # Create map
118
+ map = show_anns(masks)
119
+ del masks
120
+ gc.collect()
121
+ torch.cuda.empty_cache()
122
+ return map
123
+
124
+ def infer(prompts, negative_prompts, image, num_inference_steps = 50, seed = 4, num_samples = 4):
125
+ try:
126
+ # Segment Image
127
+ print("Segmenting Everything")
128
+ segmented_map = segment_image(image, seed)
129
+ yield segmented_map, [Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))] * num_samples
130
+ # Generate
131
+ rng = torch.Generator(device="cpu").manual_seed(seed)
132
+ num_inference_steps = int(num_inference_steps)
133
+
134
+ print(f"Generating Prompt: {prompts} \nNegative Prompt: {negative_prompts} \nSamples:{num_samples}")
135
+ output = pipe([prompts] * num_samples,
136
+ [segmented_map] * num_samples,
137
+ negative_prompt = [negative_prompts] * num_samples,
138
+ generator = rng,
139
+ num_inference_steps = num_inference_steps)
140
+
141
+
142
+ final_image = output.images
143
+ del output
144
+
145
+ except Exception as e:
146
+ print("Error: " + str(e))
147
+ final_image = segmented_map = [np.zeros((512, 512, 3), dtype=np.uint8)] * num_samples
148
+ finally:
149
+ gc.collect()
150
+ torch.cuda.empty_cache()
151
+ yield segmented_map, final_image
152
+
153
+
154
+ cond_img = gr.Image(label="Input", shape=(512, 512), value=default_example[2])\
155
+ .style(height=400)
156
+
157
+ segm_img = gr.Image(label="Segmented Image", shape=(512, 512), interactive=False)\
158
+ .style(height=400)
159
+
160
+ output = gr.Gallery(label="Generated images")\
161
+ .style(height=200, rows=[2], columns=[2], object_fit="contain")
162
+
163
+ prompt = gr.Textbox(lines=1, label="Prompt", value=default_example[0])
164
+ negative_prompt = gr.Textbox(lines=1, label="Negative Prompt", value=default_example[1])
165
+
166
+
167
+ with gr.Blocks(css=css) as demo:
168
+ with gr.Row():
169
+ with gr.Column():
170
+ # Title
171
+ gr.Markdown(title)
172
+ # Description
173
+ gr.Markdown(description)
174
+
175
+ with gr.Column():
176
+ # Examples
177
+ gr.Markdown(gif_html)
178
+
179
+ # Images
180
+ with gr.Row(variant="panel"):
181
+ with gr.Column(scale=1):
182
+ cond_img.render()
183
+
184
+ with gr.Column(scale=1):
185
+ segm_img.render()
186
+
187
+ with gr.Column(scale=1):
188
+ output.render()
189
+
190
+ # Submit & Clear
191
+ with gr.Row():
192
+ with gr.Column():
193
+ prompt.render()
194
+ negative_prompt.render()
195
+
196
+ with gr.Column():
197
+ with gr.Accordion("Advanced options", open=False):
198
+ num_steps = gr.Slider(10, 60, 50, step=1, label="Steps")
199
+ seed = gr.Slider(0, 1024, 4, step=1, label="Seed")
200
+ num_samples = gr.Slider(1, 4, 4, step=1, label="Nº Samples")
201
+
202
+ segment_btn = gr.Button("Segment")
203
+ submit = gr.Button("Segment & Generate Images")
204
+ # TODO: Download Button
205
+
206
+ with gr.Row():
207
+ with gr.Column():
208
+ gr.Markdown("Try some of the examples below ⬇️")
209
+ gr.Examples(examples=examples,
210
+ inputs=[prompt, negative_prompt, cond_img],
211
+ outputs=output,
212
+ fn=infer,
213
+ examples_per_page=4)
214
+
215
+ with gr.Column():
216
+ gr.Markdown(about, elem_classes="about")
217
+
218
+ submit.click(infer,
219
+ inputs=[prompt, negative_prompt, cond_img, num_steps, seed, num_samples],
220
+ outputs = [segm_img, output])
221
+
222
+ segment_btn.click(segment_image,
223
+ inputs=[cond_img, seed],
224
+ outputs=segm_img)
225
+
226
+ demo.queue()
227
+ demo.launch()
gitattributes.txt ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ flax
3
+ jax[cuda11_pip]
4
+ -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
5
+ jaxlib
6
+ git+https://github.com/huggingface/diffusers@main
7
+ opencv-python
8
+ --extra-index-url https://download.pytorch.org/whl/cu113
9
+ torch
10
+ torchvision
11
+ git+https://github.com/facebookresearch/segment-anything.git
12
+ accelerate