Spaces:
Running
on
L40S
Running
on
L40S
BestWishYsh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -135,6 +135,7 @@ def generate(
|
|
135 |
num_inference_steps: int,
|
136 |
guidance_scale: float,
|
137 |
seed: int = 42,
|
|
|
138 |
scale_status: bool = False,
|
139 |
rife_status: bool = False,
|
140 |
):
|
@@ -162,11 +163,14 @@ def generate(
|
|
162 |
image = ImageOps.exif_transpose(Image.fromarray(tensor))
|
163 |
|
164 |
prompt = prompt.strip('"')
|
|
|
|
|
165 |
|
166 |
generator = torch.Generator(device).manual_seed(seed) if seed else None
|
167 |
|
168 |
video_pt = pipe(
|
169 |
prompt=prompt,
|
|
|
170 |
image=image,
|
171 |
num_videos_per_prompt=1,
|
172 |
num_inference_steps=num_inference_steps,
|
@@ -260,6 +264,7 @@ with gr.Blocks() as demo:
|
|
260 |
with gr.Accordion("IPT2V: Face Input", open=True):
|
261 |
image_input = gr.Image(label="Input Image (should contain clear face, preferably half-body or full-body image)")
|
262 |
prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here. ConsisID has high requirements for prompt quality. You can use GPT-4o to refine the input text prompt, example can be found on our github.", lines=5)
|
|
|
263 |
with gr.Accordion("Examples", open=False):
|
264 |
examples_component_images = gr.Examples(
|
265 |
examples_images,
|
@@ -356,6 +361,7 @@ with gr.Blocks() as demo:
|
|
356 |
|
357 |
def run(
|
358 |
prompt,
|
|
|
359 |
image_input,
|
360 |
seed_value,
|
361 |
scale_status,
|
@@ -365,6 +371,7 @@ with gr.Blocks() as demo:
|
|
365 |
batch_video_frames, seed = generate(
|
366 |
prompt,
|
367 |
image_input,
|
|
|
368 |
num_inference_steps=50,
|
369 |
guidance_scale=7.0,
|
370 |
seed=seed_value,
|
@@ -384,7 +391,7 @@ with gr.Blocks() as demo:
|
|
384 |
|
385 |
generate_button.click(
|
386 |
fn=run,
|
387 |
-
inputs=[prompt, image_input, seed_param, enable_scale, enable_rife],
|
388 |
outputs=[video_output, download_video_button, download_gif_button, seed_text],
|
389 |
)
|
390 |
|
|
|
135 |
num_inference_steps: int,
|
136 |
guidance_scale: float,
|
137 |
seed: int = 42,
|
138 |
+
negative_prompt: str = None,
|
139 |
scale_status: bool = False,
|
140 |
rife_status: bool = False,
|
141 |
):
|
|
|
163 |
image = ImageOps.exif_transpose(Image.fromarray(tensor))
|
164 |
|
165 |
prompt = prompt.strip('"')
|
166 |
+
if negative_prompt:
|
167 |
+
negative_prompt = negative_prompt.strip('"')
|
168 |
|
169 |
generator = torch.Generator(device).manual_seed(seed) if seed else None
|
170 |
|
171 |
video_pt = pipe(
|
172 |
prompt=prompt,
|
173 |
+
negative_prompt=negative_prompt,
|
174 |
image=image,
|
175 |
num_videos_per_prompt=1,
|
176 |
num_inference_steps=num_inference_steps,
|
|
|
264 |
with gr.Accordion("IPT2V: Face Input", open=True):
|
265 |
image_input = gr.Image(label="Input Image (should contain clear face, preferably half-body or full-body image)")
|
266 |
prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here. ConsisID has high requirements for prompt quality. You can use GPT-4o to refine the input text prompt, example can be found on our github.", lines=5)
|
267 |
+
negative_prompt = gr.Textbox(label="Negative Prompt (Default is None)", placeholder="Enter your negative prompt here. Default is None", lines=1)
|
268 |
with gr.Accordion("Examples", open=False):
|
269 |
examples_component_images = gr.Examples(
|
270 |
examples_images,
|
|
|
361 |
|
362 |
def run(
|
363 |
prompt,
|
364 |
+
negative_prompt,
|
365 |
image_input,
|
366 |
seed_value,
|
367 |
scale_status,
|
|
|
371 |
batch_video_frames, seed = generate(
|
372 |
prompt,
|
373 |
image_input,
|
374 |
+
negative_prompt=negative_prompt,
|
375 |
num_inference_steps=50,
|
376 |
guidance_scale=7.0,
|
377 |
seed=seed_value,
|
|
|
391 |
|
392 |
generate_button.click(
|
393 |
fn=run,
|
394 |
+
inputs=[prompt, negative_prompt, image_input, seed_param, enable_scale, enable_rife],
|
395 |
outputs=[video_output, download_video_button, download_gif_button, seed_text],
|
396 |
)
|
397 |
|