Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ import PIL.Image
|
|
10 |
import gradio as gr
|
11 |
import tensorflow_hub as hub
|
12 |
import matplotlib.pyplot as plt
|
13 |
-
from real_esrgan_app import *
|
14 |
|
15 |
import gradio as gr
|
16 |
import requests
|
@@ -33,6 +33,7 @@ import glob
|
|
33 |
import pathlib
|
34 |
|
35 |
API_TOKEN = os.environ.get("HF_READ_TOKEN")
|
|
|
36 |
|
37 |
'''
|
38 |
dataset = load_dataset("Gustavosta/Stable-Diffusion-Prompts")
|
@@ -57,7 +58,7 @@ def tensor_to_image(tensor):
|
|
57 |
return PIL.Image.fromarray(tensor)
|
58 |
|
59 |
|
60 |
-
def perform_neural_transfer(content_image_input, style_image_input,
|
61 |
content_image = content_image_input.astype(np.float32)[np.newaxis, ...] / 255.
|
62 |
content_image = tf.image.resize(content_image, (400, 600))
|
63 |
|
@@ -73,7 +74,8 @@ def perform_neural_transfer(content_image_input, style_image_input, super_resolu
|
|
73 |
stylized_image = tensor_to_image(stylized_image)
|
74 |
content_image_input = tensor_to_image(content_image_input)
|
75 |
stylized_image = stylized_image.resize(content_image_input.size)
|
76 |
-
|
|
|
77 |
print("super_resolution_type :")
|
78 |
print(super_resolution_type)
|
79 |
#print(super_resolution_type.value)
|
@@ -84,6 +86,7 @@ def perform_neural_transfer(content_image_input, style_image_input, super_resolu
|
|
84 |
print("call else :")
|
85 |
stylized_image = inference(stylized_image, super_resolution_type)
|
86 |
return stylized_image
|
|
|
87 |
|
88 |
list_models = [
|
89 |
#"SDXL-1.0",
|
@@ -422,20 +425,22 @@ def style_transfer_func(content_img, style_img, style_transfer_client = style_tr
|
|
422 |
os.remove(style_im_name)
|
423 |
return Image.open(out)
|
424 |
'''
|
425 |
-
def style_transfer_func(content_img, style_img
|
426 |
assert hasattr(content_img, "save")
|
427 |
assert hasattr(style_img, "save")
|
|
|
|
|
|
|
428 |
content_image_input = np.asarray(content_img)
|
429 |
style_image_input = np.asarray(style_img)
|
430 |
-
out = perform_neural_transfer(content_image_input, style_image_input
|
431 |
assert hasattr(out, "save")
|
432 |
return out
|
433 |
|
434 |
|
435 |
def gen_images_from_event_fact(current_model, event_fact = DEFAULT_PROMPT, role_name = DEFAULT_ROLE,
|
436 |
-
style_pic = None
|
437 |
):
|
438 |
-
assert super_resolution_type in ["SD(Standard Definition)", "HD(High Definition)"]
|
439 |
event_reasoning_dict = produce_4_event(event_fact)
|
440 |
caption_list ,event_reasoning_sd_list = transform_4_event_as_sd_prompts(event_fact ,
|
441 |
event_reasoning_dict,
|
@@ -452,14 +457,7 @@ def gen_images_from_event_fact(current_model, event_fact = DEFAULT_PROMPT, role_
|
|
452 |
print("perform styling.....")
|
453 |
img_list_ = []
|
454 |
for x in tqdm(img_list):
|
455 |
-
|
456 |
-
img_list_.append(
|
457 |
-
style_transfer_func(x, style_pic, super_resolution_type = "none")
|
458 |
-
)
|
459 |
-
else:
|
460 |
-
img_list_.append(
|
461 |
-
style_transfer_func(x, style_pic, super_resolution_type = "anime")
|
462 |
-
)
|
463 |
img_list = img_list_
|
464 |
img_list = list(map(lambda t2: add_caption_on_image(t2[0], t2[1]) ,zip(*[img_list, caption_list])))
|
465 |
img_mid = img_list[2]
|
@@ -492,7 +490,7 @@ def get_book_covers():
|
|
492 |
with gr.Blocks(css=".caption-label {display:none}") as demo:
|
493 |
favicon = '<img src="" width="48px" style="display: inline">'
|
494 |
gr.Markdown(
|
495 |
-
f"""<h1><center
|
496 |
"""
|
497 |
)
|
498 |
with gr.Row():
|
@@ -510,13 +508,21 @@ with gr.Blocks(css=".caption-label {display:none}") as demo:
|
|
510 |
value = DEFAULT_BOOK_COVER,
|
511 |
interactive = True,
|
512 |
)
|
|
|
513 |
super_resolution_type = gr.Radio(choices = ["SD(Standard Definition)" ,"HD(High Definition)"],
|
514 |
value="SD(Standard Definition)", label="Story Video Quality",
|
515 |
interactive = True)
|
|
|
516 |
|
517 |
with gr.Row():
|
518 |
-
text_prompt = gr.Textbox(label="Prompt", placeholder=
|
519 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
520 |
text_button = gr.Button("Generate", variant='primary', elem_id="gen-button")
|
521 |
|
522 |
with gr.Row():
|
@@ -529,7 +535,7 @@ with gr.Blocks(css=".caption-label {display:none}") as demo:
|
|
529 |
image_click, style_reference_input_gallery, style_reference_input_image
|
530 |
)
|
531 |
|
532 |
-
text_button.click(gen_images_from_event_fact, inputs=[current_model, text_prompt, role_name, style_reference_input_image
|
533 |
outputs=video_output)
|
534 |
|
535 |
#select_button.click(generate_txt2img, inputs=[current_model, select_prompt, negative_prompt, image_style], outputs=image_output)
|
|
|
10 |
import gradio as gr
|
11 |
import tensorflow_hub as hub
|
12 |
import matplotlib.pyplot as plt
|
13 |
+
#from real_esrgan_app import *
|
14 |
|
15 |
import gradio as gr
|
16 |
import requests
|
|
|
33 |
import pathlib
|
34 |
|
35 |
API_TOKEN = os.environ.get("HF_READ_TOKEN")
|
36 |
+
API_TOKEN = "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"
|
37 |
|
38 |
'''
|
39 |
dataset = load_dataset("Gustavosta/Stable-Diffusion-Prompts")
|
|
|
58 |
return PIL.Image.fromarray(tensor)
|
59 |
|
60 |
|
61 |
+
def perform_neural_transfer(content_image_input, style_image_input, hub_module = hub_module):
|
62 |
content_image = content_image_input.astype(np.float32)[np.newaxis, ...] / 255.
|
63 |
content_image = tf.image.resize(content_image, (400, 600))
|
64 |
|
|
|
74 |
stylized_image = tensor_to_image(stylized_image)
|
75 |
content_image_input = tensor_to_image(content_image_input)
|
76 |
stylized_image = stylized_image.resize(content_image_input.size)
|
77 |
+
return stylized_image
|
78 |
+
'''
|
79 |
print("super_resolution_type :")
|
80 |
print(super_resolution_type)
|
81 |
#print(super_resolution_type.value)
|
|
|
86 |
print("call else :")
|
87 |
stylized_image = inference(stylized_image, super_resolution_type)
|
88 |
return stylized_image
|
89 |
+
'''
|
90 |
|
91 |
list_models = [
|
92 |
#"SDXL-1.0",
|
|
|
425 |
os.remove(style_im_name)
|
426 |
return Image.open(out)
|
427 |
'''
|
428 |
+
def style_transfer_func(content_img, style_img):
|
429 |
assert hasattr(content_img, "save")
|
430 |
assert hasattr(style_img, "save")
|
431 |
+
colors, pixel_count = extcolors.extract_from_image(style_img)
|
432 |
+
if colors and colors[0][0] == (255, 255, 255) and (colors[0][1] / sum(map(lambda t2: t2[1] ,colors)) > 0.95):
|
433 |
+
return content_img
|
434 |
content_image_input = np.asarray(content_img)
|
435 |
style_image_input = np.asarray(style_img)
|
436 |
+
out = perform_neural_transfer(content_image_input, style_image_input)
|
437 |
assert hasattr(out, "save")
|
438 |
return out
|
439 |
|
440 |
|
441 |
def gen_images_from_event_fact(current_model, event_fact = DEFAULT_PROMPT, role_name = DEFAULT_ROLE,
|
442 |
+
style_pic = None
|
443 |
):
|
|
|
444 |
event_reasoning_dict = produce_4_event(event_fact)
|
445 |
caption_list ,event_reasoning_sd_list = transform_4_event_as_sd_prompts(event_fact ,
|
446 |
event_reasoning_dict,
|
|
|
457 |
print("perform styling.....")
|
458 |
img_list_ = []
|
459 |
for x in tqdm(img_list):
|
460 |
+
img_list_.append(style_transfer_func(x, style_pic))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
461 |
img_list = img_list_
|
462 |
img_list = list(map(lambda t2: add_caption_on_image(t2[0], t2[1]) ,zip(*[img_list, caption_list])))
|
463 |
img_mid = img_list[2]
|
|
|
490 |
with gr.Blocks(css=".caption-label {display:none}") as demo:
|
491 |
favicon = '<img src="" width="48px" style="display: inline">'
|
492 |
gr.Markdown(
|
493 |
+
f"""<h1><center> 🎥💬 Comet Atomic Story Teller</center></h1>
|
494 |
"""
|
495 |
)
|
496 |
with gr.Row():
|
|
|
508 |
value = DEFAULT_BOOK_COVER,
|
509 |
interactive = True,
|
510 |
)
|
511 |
+
'''
|
512 |
super_resolution_type = gr.Radio(choices = ["SD(Standard Definition)" ,"HD(High Definition)"],
|
513 |
value="SD(Standard Definition)", label="Story Video Quality",
|
514 |
interactive = True)
|
515 |
+
'''
|
516 |
|
517 |
with gr.Row():
|
518 |
+
text_prompt = gr.Textbox(label="Event Prompt", placeholder=DEFAULT_PROMPT,
|
519 |
+
lines=1, elem_id="prompt-text-input", value = DEFAULT_PROMPT,
|
520 |
+
info = "You should set the prompt in format 'X do something', X is the role in the right."
|
521 |
+
)
|
522 |
+
role_name = gr.Textbox(label="Role (X)", placeholder=DEFAULT_ROLE, lines=1,
|
523 |
+
elem_id="prompt-text-input", value = DEFAULT_ROLE,
|
524 |
+
info = "You should set the Role (X) with some famous man (like: Confucius Superman)"
|
525 |
+
)
|
526 |
text_button = gr.Button("Generate", variant='primary', elem_id="gen-button")
|
527 |
|
528 |
with gr.Row():
|
|
|
535 |
image_click, style_reference_input_gallery, style_reference_input_image
|
536 |
)
|
537 |
|
538 |
+
text_button.click(gen_images_from_event_fact, inputs=[current_model, text_prompt, role_name, style_reference_input_image],
|
539 |
outputs=video_output)
|
540 |
|
541 |
#select_button.click(generate_txt2img, inputs=[current_model, select_prompt, negative_prompt, image_style], outputs=image_output)
|