svjack commited on
Commit
9a1c005
1 Parent(s): ca198b1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +465 -0
app.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import PIL.Image
4
+ import gradio as gr
5
+ import matplotlib.pyplot as plt
6
+
7
+ import requests
8
+ import io
9
+ import random
10
+ import os
11
+ from PIL import Image, ImageDraw, ImageFont
12
+
13
+ import pandas as pd
14
+ from time import sleep
15
+ from tqdm import tqdm
16
+
17
+ import extcolors
18
+ from gradio_client import Client
19
+
20
+ import cv2
21
+ import numpy as np
22
+ import glob
23
+ import pathlib
24
+ from skimage import io as skio
25
+ from pyxelate import Pyx, Pal
26
+ from uuid import uuid1
27
+
28
+ API_TOKEN = os.environ.get("HF_READ_TOKEN")
29
+
30
+ DEFAULT_PROMPT = "Superman go to Istanbul"
31
+ #DEFAULT_ROLE = "Superman"
32
+ #DEFAULT_BOOK_COVER = "book_cover_dir/Blank.png"
33
+
34
+ def tensor_to_image(tensor):
35
+ tensor = tensor*255
36
+ tensor = np.array(tensor, dtype=np.uint8)
37
+ if np.ndim(tensor)>3:
38
+ assert tensor.shape[0] == 1
39
+ tensor = tensor[0]
40
+ return PIL.Image.fromarray(tensor)
41
+
42
+
43
+ list_models = [
44
+ "Pixel-Art-XL",
45
+ "SD-1.5",
46
+ "OpenJourney-V4",
47
+ "Anything-V4",
48
+ "Disney-Pixar-Cartoon",
49
+ "Dalle-3-XL",
50
+ ]
51
+
52
+
53
+ def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7,
54
+ seed=None, API_TOKEN = API_TOKEN):
55
+ if current_model == "SD-1.5":
56
+ API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
57
+ elif current_model == "OpenJourney-V4":
58
+ API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
59
+ elif current_model == "Anything-V4":
60
+ API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0"
61
+ elif current_model == "Disney-Pixar-Cartoon":
62
+ API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon"
63
+ elif current_model == "Pixel-Art-XL":
64
+ API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
65
+ elif current_model == "Dalle-3-XL":
66
+ API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
67
+
68
+
69
+ #API_TOKEN = os.environ.get("HF_READ_TOKEN")
70
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
71
+
72
+ if type(prompt) != type(""):
73
+ prompt = DEFAULT_PROMPT
74
+
75
+ if image_style == "None style":
76
+ payload = {
77
+ "inputs": prompt + ", 8k",
78
+ "is_negative": is_negative,
79
+ "steps": steps,
80
+ "cfg_scale": cfg_scale,
81
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
82
+ }
83
+ elif image_style == "Cinematic":
84
+ payload = {
85
+ "inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko",
86
+ "is_negative": is_negative + ", abstract, cartoon, stylized",
87
+ "steps": steps,
88
+ "cfg_scale": cfg_scale,
89
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
90
+ }
91
+ elif image_style == "Digital Art":
92
+ payload = {
93
+ "inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star",
94
+ "is_negative": is_negative + ", sharp , modern , bright",
95
+ "steps": steps,
96
+ "cfg_scale": cfg_scale,
97
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
98
+ }
99
+ elif image_style == "Portrait":
100
+ payload = {
101
+ "inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)",
102
+ "is_negative": is_negative,
103
+ "steps": steps,
104
+ "cfg_scale": cfg_scale,
105
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
106
+ }
107
+
108
+ image_bytes = requests.post(API_URL, headers=headers, json=payload).content
109
+ image = Image.open(io.BytesIO(image_bytes))
110
+ return image
111
+
112
+ from huggingface_hub import InferenceClient
113
+ import gradio as gr
114
+ import pandas as pd
115
+ import numpy as np
116
+ import os
117
+
118
+ event_reasoning_df = pd.DataFrame(
119
+ [['Use the following events as a background to answer questions related to the cause and effect of time.', 'Ok'],
120
+
121
+ ['What are the necessary preconditions for the next event?:X had a big meal.', 'X placed an order'],
122
+ ['What could happen after the next event?:X had a big meal.', 'X becomes fat'],
123
+ ['What is the motivation for the next event?:X had a big meal.', 'X is hungry'],
124
+ ['What are your feelings after the following event?:X had a big meal.', "X tastes good"],
125
+
126
+ ['What are the necessary preconditions for the next event?:X met his favorite star.', 'X bought a ticket'],
127
+ ['What could happen after the next event?:X met his favorite star.', 'X is motivated'],
128
+ ['What is the motivation for the next event?:X met his favorite star.', 'X wants to have some entertainment'],
129
+ ['What are your feelings after the following event?:X met his favorite star.', "X is in a happy mood"],
130
+
131
+ ['What are the necessary preconditions for the next event?: X to cheat', 'X has evil intentions'],
132
+ ['What could happen after the next event?:X to cheat', 'X is accused'],
133
+ ['What is the motivation for the next event?:X to cheat', 'X wants to get something for nothing'],
134
+ ['What are your feelings after the following event?:X to cheat', "X is starving and freezing in prison"],
135
+
136
+ ['What could happen after the next event?:X go to Istanbul', ''],
137
+ ],
138
+ columns = ["User", "Assistant"]
139
+ )
140
+
141
+ Mistral_7B_client = InferenceClient(
142
+ "mistralai/Mistral-7B-Instruct-v0.1"
143
+ )
144
+
145
+ NEED_PREFIX = 'What are the necessary preconditions for the next event?'
146
+ EFFECT_PREFIX = 'What could happen after the next event?'
147
+ INTENT_PREFIX = 'What is the motivation for the next event?'
148
+ REACT_PREFIX = 'What are your feelings after the following event?'
149
+
150
+ def format_prompt(message, history):
151
+ prompt = "<s>"
152
+ for user_prompt, bot_response in history:
153
+ prompt += f"[INST] {user_prompt} [/INST]"
154
+ prompt += f" {bot_response}</s> "
155
+ prompt += f"[INST] {message} [/INST]"
156
+ return prompt
157
+
158
+ def generate(
159
+ prompt, history, client = Mistral_7B_client,
160
+ temperature=0.7, max_new_tokens=256, top_p=0.95, repetition_penalty=1.1,
161
+ ):
162
+ temperature = float(temperature)
163
+ if temperature < 1e-2:
164
+ temperature = 1e-2
165
+ top_p = float(top_p)
166
+
167
+ generate_kwargs = dict(
168
+ temperature=temperature,
169
+ max_new_tokens=max_new_tokens,
170
+ top_p=top_p,
171
+ repetition_penalty=repetition_penalty,
172
+ do_sample=True,
173
+ seed=42,
174
+ )
175
+
176
+ formatted_prompt = format_prompt(prompt, history)
177
+
178
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
179
+ output = ""
180
+
181
+ for response in stream:
182
+ output += response.token.text
183
+ yield output
184
+ return output
185
+
186
+ l = [['Confucius', 'X read a book'],
187
+ ['Superman', 'X go to Istanbul'],
188
+ ['Monk Xuanzang', 'X went to the West to obtain Buddhist scriptures'],
189
+ ['Mickey Mouse', 'X attends a party'],
190
+ ['Napoleon', 'X riding a horse'],
191
+ ['The Pope', 'X is being crowned'],
192
+ ['Harry Potter', 'X defeated Voldemort'],
193
+ ['Minions', 'X join the interstellar war'],
194
+ ['Augustus Octavian', 'X served as tribune'],
195
+ ['The Eastern Roman Emperor', 'X defeats Mongol Invaders']]
196
+ l = [
197
+ ('Extract entity from following sentence.', 'Ok')
198
+ ] + pd.DataFrame(l, columns = ["Role", "Event"]).apply(
199
+ lambda x: (x["Event"].replace("X", x["Role"]), "{} : {}".format(x["Role"], x["Event"])), axis = 1
200
+ ).values.tolist()
201
+
202
+ #list(generate("The forbidden city build by emp from ming.", history = l, max_new_tokens = 2048))[-1]
203
+ #' The Forbidden City : X build by Emp from Ming</s>'
204
+
205
+ hist = event_reasoning_df.iloc[:-1, :].apply(
206
+ lambda x: (x["User"], x["Assistant"]), axis = 1
207
+ )
208
+
209
+ def produce_4_event(event_fact, hist = hist):
210
+ NEED_PREFIX_prompt = "{}:{}".format(NEED_PREFIX, event_fact)
211
+ EFFECT_PREFIX_prompt = "{}:{}".format(EFFECT_PREFIX, event_fact)
212
+ INTENT_PREFIX_prompt = "{}:{}".format(INTENT_PREFIX, event_fact)
213
+ REACT_PREFIX_prompt = "{}:{}".format(REACT_PREFIX, event_fact)
214
+ NEED_PREFIX_output = list(generate(NEED_PREFIX_prompt, history = hist, max_new_tokens = 2048))[-1]
215
+ EFFECT_PREFIX_output = list(generate(EFFECT_PREFIX_prompt, history = hist, max_new_tokens = 2048))[-1]
216
+ INTENT_PREFIX_output = list(generate(INTENT_PREFIX_prompt, history = hist, max_new_tokens = 2048))[-1]
217
+ REACT_PREFIX_output = list(generate(REACT_PREFIX_prompt, history = hist, max_new_tokens = 2048))[-1]
218
+ NEED_PREFIX_output, EFFECT_PREFIX_output, INTENT_PREFIX_output, REACT_PREFIX_output = map(lambda x: x.replace("</s>", ""), [NEED_PREFIX_output, EFFECT_PREFIX_output, INTENT_PREFIX_output, REACT_PREFIX_output])
219
+ return {
220
+ NEED_PREFIX: NEED_PREFIX_output,
221
+ EFFECT_PREFIX: EFFECT_PREFIX_output,
222
+ INTENT_PREFIX: INTENT_PREFIX_output,
223
+ REACT_PREFIX: REACT_PREFIX_output,
224
+ }
225
+
226
+ def transform_4_event_as_sd_prompts(event_fact ,event_reasoning_dict, role_name = "superman"):
227
+ req = {}
228
+ for k, v in event_reasoning_dict.items():
229
+ if type(role_name) == type("") and role_name.strip():
230
+ v_ = v.replace("X", role_name)
231
+ else:
232
+ v_ = v
233
+ req[k] = list(generate("Transform this as a prompt in stable diffusion: {}".\
234
+ format(v_),
235
+ history = [], max_new_tokens = 2048))[-1].replace("</s>", "")
236
+ event_fact_ = event_fact.replace("X", role_name)
237
+ req["EVENT_FACT"] = list(generate("Transform this as a prompt in stable diffusion: {}".\
238
+ format(event_fact_),
239
+ history = [], max_new_tokens = 2048))[-1].replace("</s>", "")
240
+ req_list = [
241
+ req[INTENT_PREFIX], req[NEED_PREFIX],
242
+ req["EVENT_FACT"],
243
+ req[REACT_PREFIX], req[EFFECT_PREFIX]
244
+ ]
245
+ caption_list = [
246
+ event_reasoning_dict[INTENT_PREFIX], event_reasoning_dict[NEED_PREFIX],
247
+ event_fact,
248
+ event_reasoning_dict[REACT_PREFIX], event_reasoning_dict[EFFECT_PREFIX]
249
+ ]
250
+ caption_list = list(map(lambda x: x.replace("X", role_name), caption_list))
251
+ return caption_list ,req_list
252
+
253
+ def batch_as_list(input_, batch_size = 3):
254
+ req = []
255
+ for ele in input_:
256
+ if not req or len(req[-1]) >= batch_size:
257
+ req.append([ele])
258
+ else:
259
+ req[-1].append(ele)
260
+ return req
261
+
262
+ def add_margin(pil_img, top, right, bottom, left, color):
263
+ width, height = pil_img.size
264
+ new_width = width + right + left
265
+ new_height = height + top + bottom
266
+ result = Image.new(pil_img.mode, (new_width, new_height), color)
267
+ result.paste(pil_img, (left, top))
268
+ return result
269
+
270
+ def add_caption_on_image(input_image, caption, marg_ratio = 0.15, row_token_num = 6):
271
+ from uuid import uuid1
272
+ assert hasattr(input_image, "save")
273
+ max_image_size = max(input_image.size)
274
+ marg_size = int(marg_ratio * max_image_size)
275
+ colors, pixel_count = extcolors.extract_from_image(input_image)
276
+ input_image = add_margin(input_image, marg_size, 0, 0, marg_size, colors[0][0])
277
+ font = ImageFont.truetype("DejaVuSerif-Italic.ttf" ,int(marg_size / 4))
278
+ caption_token_list = list(map(lambda x: x.strip() ,caption.split(" ")))
279
+ caption_list = list(map(" ".join ,batch_as_list(caption_token_list, row_token_num)))
280
+ draw = ImageDraw.Draw(input_image)
281
+ for line_num ,line_caption in enumerate(caption_list):
282
+ position = (
283
+ int(marg_size / 4) * (line_num + 1) * 1.1 ,
284
+ (int(marg_size / 4) * (
285
+ (line_num + 1) * 1.1
286
+ )))
287
+ draw.text(position, line_caption, fill="black", font = font)
288
+ return input_image
289
+
290
+
291
+ def expand2square(pil_img, background_color):
292
+ width, height = pil_img.size
293
+ if width == height:
294
+ return pil_img
295
+ elif width > height:
296
+ result = Image.new(pil_img.mode, (width, width), background_color)
297
+ result.paste(pil_img, (0, (width - height)))
298
+ return result
299
+ else:
300
+ result = Image.new(pil_img.mode, (height, height), background_color)
301
+ result.paste(pil_img, ((height - width)))
302
+ return result
303
+
304
+ def generate_video(images, video_name = 'ppt.avi'):
305
+ import cv2
306
+ from uuid import uuid1
307
+ im_names = []
308
+ for im in images:
309
+ name = "{}.png".format(uuid1())
310
+ im.save(name)
311
+ im_names.append(name)
312
+ frame = cv2.imread(im_names[0])
313
+
314
+ # setting the frame width, height width
315
+ # the width, height of first image
316
+ height, width, layers = frame.shape
317
+
318
+ video = cv2.VideoWriter(video_name, 0, 1, (width, height))
319
+
320
+ # Appending the images to the video one by one
321
+ for name in im_names:
322
+ video.write(cv2.imread(name))
323
+ os.remove(name)
324
+
325
+ # Deallocating memories taken for window creation
326
+ #cv2.destroyAllWindows()
327
+ video.release() # releasing the video generated
328
+
329
+ def make_video_from_image_list(image_list, video_name = "ppt.avi"):
330
+ if os.path.exists(video_name):
331
+ os.remove(video_name)
332
+ assert all(map(lambda x: hasattr(x, "save"), image_list))
333
+ max_size = list(map(max ,zip(*map(lambda x: x.size, image_list))))
334
+ max_size = max(max_size)
335
+ image_list = list(map(lambda x: expand2square(x,
336
+ extcolors.extract_from_image(x)[0][0][0]
337
+ ).resize((max_size, max_size)), image_list))
338
+
339
+ generate_video(image_list, video_name = video_name)
340
+ return video_name
341
+
342
+ def style_transfer_func(content_img, downsample, palette, depth, upscale):
343
+ assert hasattr(content_img, "save")
344
+ #image = io.imread(image.name)
345
+ path = "{}.png".format(uuid1())
346
+ #Image.fromarray(image).save(path)
347
+ content_img.save(path)
348
+ image = skio.imread(path)
349
+ os.remove(path)
350
+ downsample_by = int(downsample) # new image will be 1/14th of the original in size
351
+ palette = int(palette) # find 7 colors
352
+ # 1) Instantiate Pyx transformer
353
+ pyx = Pyx(factor=downsample_by, palette=palette,depth=int(depth),upscale = int(upscale))
354
+ # 2) fit an image, allow Pyxelate to learn the color palette
355
+ pyx.fit(image)
356
+ # 3) transform image to pixel art using the learned color palette
357
+ new_image = pyx.transform(image)
358
+ # save new image with 'skimage.io.imsave()'
359
+ skio.imsave(path, new_image)
360
+ out = Image.open(path)
361
+ os.remove(path)
362
+ return out
363
+
364
+ def gen_images_from_event_fact(current_model, event_fact, role_name,
365
+ downsample = 0, palette = 0, depth = 0, upscale = 0,
366
+ ):
367
+ event_reasoning_dict = produce_4_event(event_fact)
368
+ caption_list ,event_reasoning_sd_list = transform_4_event_as_sd_prompts(event_fact ,
369
+ event_reasoning_dict,
370
+ role_name = role_name
371
+ )
372
+ img_list = []
373
+ for prompt in tqdm(event_reasoning_sd_list):
374
+ im = generate_txt2img(current_model, prompt, is_negative=False, image_style="None style")
375
+ img_list.append(im)
376
+ sleep(2)
377
+ img_list = list(filter(lambda x: hasattr(x, "save"), img_list))
378
+ if downsample is not None and downsample > 0:
379
+ print("perform styling.....")
380
+ img_list_ = []
381
+ for x in tqdm(img_list):
382
+ img_list_.append(style_transfer_func(x, downsample, palette, depth, upscale))
383
+ #img_list = img_list_
384
+ else:
385
+ img_list_ = img_list
386
+ def trans_img_list_to_video(img_list, video_name):
387
+ img_list = list(map(lambda t2: add_caption_on_image(t2[0], t2[1]) ,zip(*[img_list, caption_list])))
388
+ img_mid = img_list[2]
389
+ img_list_reordered = [img_mid]
390
+ for ele in img_list:
391
+ if ele not in img_list_reordered:
392
+ img_list_reordered.append(ele)
393
+ video_path = make_video_from_image_list(img_list_reordered, video_name = video_name)
394
+ return video_path
395
+ ppt_avi_path = trans_img_list_to_video(img_list, "ppt.avi")
396
+ pix_ppt_avi_path = trans_img_list_to_video(img_list_, "pix_ppt.avi")
397
+ return ppt_avi_path, pix_ppt_avi_path
398
+
399
+
400
+ def gen_images_from_prompt(current_model, prompt = DEFAULT_PROMPT,
401
+ downsample = 0, palette = 0, depth = 0, upscale = 0,
402
+ ):
403
+ #### event_fact = DEFAULT_PROMPT, role_name = DEFAULT_ROLE
404
+ #list(generate("The forbidden city build by emp from ming.", history = l, max_new_tokens = 2048))[-1]
405
+ #' The Forbidden City : X build by Emp from Ming</s>'
406
+ out = list(generate(prompt, history = l, max_new_tokens = 2048))[-1]
407
+ role_name, event_fact = map(lambda x: x.replace("</s>", "").strip() ,out.split(":"))
408
+ video_path, pix_video_path = gen_images_from_event_fact(current_model, event_fact, role_name,
409
+ downsample, palette, depth, upscale,
410
+ )
411
+ return video_path, pix_video_path
412
+
413
+ with gr.Blocks(css=".caption-label {display:none}") as demo:
414
+ favicon = '<img src="" width="48px" style="display: inline">'
415
+ gr.Markdown(
416
+ f"""<h1><center> 🧱 Pixel Story Teller</center></h1>
417
+ """
418
+ )
419
+ with gr.Row():
420
+ with gr.Column(elem_id="prompt-container"):
421
+ with gr.Row():
422
+ gr.HTML('''<h2 id="input_header">Input 👇</h2>''')
423
+ with gr.Row():
424
+ text_prompt = gr.Textbox(label="Event Prompt", placeholder=DEFAULT_PROMPT,
425
+ lines=1, elem_id="prompt-text-input", value = DEFAULT_PROMPT,
426
+ info = "You should set the prompt in format 'Someone do something'",
427
+ )
428
+ with gr.Row():
429
+ current_model = gr.Dropdown(label="Current Model", choices=list_models, value="Pixel-Art-XL")
430
+ downsample = gr.Number(value=2, label="downsample by")
431
+ palette = gr.Number(value=10, label="palette")
432
+ depth = gr.Number(value=1, label="depth")
433
+ upscale = gr.Number(value=2, label="upscale")
434
+
435
+ with gr.Column():
436
+ with gr.Row():
437
+ gr.HTML('<h2 id="output_header"> 👈 Input </h2>')
438
+ gr.Examples(
439
+ [
440
+ ["OpenJourney-V4", "Augustus Octavian" + " served as tribune"],
441
+ ["Pixel-Art-XL", "Confucius" + " read a book"],
442
+ ["Pixel-Art-XL", "Superman" + " go to Istanbul"],
443
+ ["SD-1.5", "Monk Xuanzang" + " went to the West to obtain Buddhist scriptures"],
444
+ ["SD-1.5", "Mickey Mouse" + " attends a party"],
445
+ ["SD-1.5", "Napoleon" + " riding a horse"],
446
+ #["SD-1.5", "The Pope" + " is being crowned"],
447
+ ["SD-1.5", "The Eastern Roman Emperor" + " defeats Mongol Invaders"],
448
+ ],
449
+ inputs = [current_model, text_prompt],
450
+ #label = "Example collection"
451
+ )
452
+ with gr.Row():
453
+ text_button = gr.Button("Generate", variant='primary', elem_id="gen-button")
454
+ with gr.Row():
455
+ with gr.Row():
456
+ video_output = gr.Video(label = "Story Video", elem_id="gallery", height = 768 - 128,)
457
+ pix_video_output = gr.Video(label = "Pixel Story Video", elem_id="gallery", height = 768 - 128,)
458
+
459
+ text_button.click(gen_images_from_prompt, inputs=[current_model, text_prompt,
460
+ downsample, palette, depth, upscale
461
+ ],
462
+ outputs=[video_output, pix_video_output])
463
+
464
+
465
+ demo.launch(show_api=False)