Spaces:
Runtime error
Runtime error
RamAnanth1
commited on
Commit
•
e05666e
1
Parent(s):
ecb48ea
Update app.py
Browse files
app.py
CHANGED
@@ -15,17 +15,17 @@ high_threshold = 200
|
|
15 |
|
16 |
|
17 |
# Models
|
18 |
-
controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
19 |
-
pipe_canny = StableDiffusionControlNetPipeline.from_pretrained(
|
20 |
-
|
21 |
-
)
|
22 |
-
pipe_canny.scheduler = UniPCMultistepScheduler.from_config(pipe_canny.scheduler.config)
|
23 |
|
24 |
-
# This command loads the individual model components on GPU on-demand. So, we don't
|
25 |
-
# need to explicitly call pipe.to("cuda").
|
26 |
-
pipe_canny.enable_model_cpu_offload()
|
27 |
|
28 |
-
pipe_canny.enable_xformers_memory_efficient_attention()
|
29 |
|
30 |
# Generator seed,
|
31 |
generator = torch.manual_seed(0)
|
@@ -47,38 +47,38 @@ pipe_pose.enable_model_cpu_offload()
|
|
47 |
pipe_pose.enable_xformers_memory_efficient_attention()
|
48 |
|
49 |
|
50 |
-
def get_canny_filter(image):
|
51 |
|
52 |
-
|
53 |
-
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
, :, None]
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
|
62 |
def get_pose(image):
|
63 |
return pose_model(image)
|
64 |
|
65 |
def process(input_image, prompt, input_control):
|
66 |
# TODO: Add other control tasks
|
67 |
-
if input_control == "Pose":
|
68 |
-
|
69 |
-
else:
|
70 |
-
|
71 |
-
|
72 |
-
def process_canny(input_image, prompt):
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
|
83 |
|
84 |
def process_pose(input_image, prompt):
|
@@ -109,22 +109,22 @@ with block:
|
|
109 |
with gr.Row():
|
110 |
with gr.Column():
|
111 |
input_image = gr.Image(source='upload', type="numpy")
|
112 |
-
input_control = gr.Dropdown(control_task_list, value="Scribble", label="Control Task")
|
113 |
prompt = gr.Textbox(label="Prompt")
|
114 |
run_button = gr.Button(label="Run")
|
115 |
|
116 |
|
117 |
with gr.Column():
|
118 |
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
|
119 |
-
ips = [input_image, prompt
|
120 |
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
121 |
examples_list = [
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
|
127 |
-
],
|
128 |
|
129 |
# [
|
130 |
# "turtle.png",
|
@@ -159,7 +159,7 @@ with block:
|
|
159 |
|
160 |
]
|
161 |
]
|
162 |
-
examples = gr.Examples(examples=examples_list,inputs = [input_image, prompt
|
163 |
gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=RamAnanth1.ControlNet)")
|
164 |
|
165 |
block.launch(debug = True)
|
|
|
15 |
|
16 |
|
17 |
# Models
|
18 |
+
# controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
19 |
+
# pipe_canny = StableDiffusionControlNetPipeline.from_pretrained(
|
20 |
+
# "runwayml/stable-diffusion-v1-5", controlnet=controlnet_canny, safety_checker=None, torch_dtype=torch.float16
|
21 |
+
# )
|
22 |
+
# pipe_canny.scheduler = UniPCMultistepScheduler.from_config(pipe_canny.scheduler.config)
|
23 |
|
24 |
+
# # This command loads the individual model components on GPU on-demand. So, we don't
|
25 |
+
# # need to explicitly call pipe.to("cuda").
|
26 |
+
# pipe_canny.enable_model_cpu_offload()
|
27 |
|
28 |
+
# pipe_canny.enable_xformers_memory_efficient_attention()
|
29 |
|
30 |
# Generator seed,
|
31 |
generator = torch.manual_seed(0)
|
|
|
47 |
pipe_pose.enable_xformers_memory_efficient_attention()
|
48 |
|
49 |
|
50 |
+
# def get_canny_filter(image):
|
51 |
|
52 |
+
# if not isinstance(image, np.ndarray):
|
53 |
+
# image = np.array(image)
|
54 |
|
55 |
+
# image = cv2.Canny(image, low_threshold, high_threshold)
|
56 |
+
# image = image[:
|
57 |
+
# , :, None]
|
58 |
+
# image = np.concatenate([image, image, image], axis=2)
|
59 |
+
# canny_image = Image.fromarray(image)
|
60 |
+
# return canny_image
|
61 |
|
62 |
def get_pose(image):
|
63 |
return pose_model(image)
|
64 |
|
65 |
def process(input_image, prompt, input_control):
|
66 |
# TODO: Add other control tasks
|
67 |
+
#if input_control == "Pose":
|
68 |
+
return process_pose(input_image, prompt)
|
69 |
+
# else:
|
70 |
+
# return process_canny(input_image, prompt)
|
71 |
+
|
72 |
+
# def process_canny(input_image, prompt):
|
73 |
+
# canny_image = get_canny_filter(input_image)
|
74 |
+
# output = pipe_canny(
|
75 |
+
# prompt,
|
76 |
+
# canny_image,
|
77 |
+
# generator=generator,
|
78 |
+
# num_images_per_prompt=1,
|
79 |
+
# num_inference_steps=20,
|
80 |
+
# )
|
81 |
+
# return [canny_image,output.images[0]]
|
82 |
|
83 |
|
84 |
def process_pose(input_image, prompt):
|
|
|
109 |
with gr.Row():
|
110 |
with gr.Column():
|
111 |
input_image = gr.Image(source='upload', type="numpy")
|
112 |
+
# input_control = gr.Dropdown(control_task_list, value="Scribble", label="Control Task")
|
113 |
prompt = gr.Textbox(label="Prompt")
|
114 |
run_button = gr.Button(label="Run")
|
115 |
|
116 |
|
117 |
with gr.Column():
|
118 |
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
|
119 |
+
ips = [input_image, prompt]
|
120 |
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
121 |
examples_list = [
|
122 |
+
# [
|
123 |
+
# "bird.png",
|
124 |
+
# "bird",
|
125 |
+
# "Canny Edge Map"
|
126 |
|
127 |
+
# ],
|
128 |
|
129 |
# [
|
130 |
# "turtle.png",
|
|
|
159 |
|
160 |
]
|
161 |
]
|
162 |
+
examples = gr.Examples(examples=examples_list,inputs = [input_image, prompt], outputs = [result_gallery], cache_examples = True, fn = process)
|
163 |
gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=RamAnanth1.ControlNet)")
|
164 |
|
165 |
block.launch(debug = True)
|