svjack commited on
Commit
b910d20
·
1 Parent(s): 5dfbe1d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +389 -0
app.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ import gradio as gr
4
+ from pathlib import Path
5
+ import os
6
+ import requests
7
+ from PIL import Image
8
+ import io
9
+ import pathlib
10
+
11
+
12
+ API_TOKEN = os.environ.get("HF_READ_TOKEN")
13
+
14
+ base_dir = "."
15
+ dropdown_options_file = Path(base_dir, "json/dropdown_options.json")
16
+ category_data_file = Path(base_dir, "json/category_data.json")
17
+ style_data_file = Path(base_dir, "json/style_data.json")
18
+ prefix_data_file = Path(base_dir, "json/prefix_data.json")
19
+ lightning_data_file = Path(base_dir, "json/lightning_data.json")
20
+ lens_data_file = Path(base_dir, "json/lens_data.json")
21
+
22
+
23
+ class Model:
24
+ '''
25
+ Small strut to hold data for the text generator
26
+ '''
27
+
28
+ def __init__(self, name) -> None:
29
+ self.name = name
30
+ pass
31
+
32
+
33
+ def populate_dropdown_options():
34
+ path = dropdown_options_file
35
+ with open(path, 'r') as f:
36
+ data = json.load(f)
37
+ category_choices = data["category"]
38
+ style_choices = data["style"]
39
+ lightning_choices = data["lightning"]
40
+ lens_choices = data["lens"]
41
+ return tuple(category_choices), tuple(style_choices), tuple(lightning_choices), tuple(lens_choices),
42
+
43
+
44
+ def add_to_prompt(*args):
45
+ prompt, use_default_negative_prompt, base_prompt, negative_base_prompt = args
46
+ default_negative_prompt = "(worst quality:1.2), (low quality:1.2), (lowres:1.1), (monochrome:1.1), (greyscale), multiple views, comic, sketch, (((bad anatomy))), (((deformed))), (((disfigured))), watermark, multiple_views, mutation hands, mutation fingers, extra fingers, missing fingers, watermark"
47
+ if(use_default_negative_prompt):
48
+ return "{} {}".format(base_prompt ,prompt), default_negative_prompt
49
+ else:
50
+ return "{} {}".format(base_prompt ,prompt), ""
51
+
52
+
53
+ def get_random_prompt(data):
54
+ random_key = random.choice(list(data.keys()))
55
+ random_array = random.choice(data[random_key])
56
+ random_strings = random.sample(random_array, 3)
57
+ return random_strings
58
+
59
+ def get_correct_prompt(data, selected_dropdown):
60
+ correct_array = data[selected_dropdown]
61
+ random_array = random.choice(correct_array)
62
+ random_strings = random.sample(random_array, 3)
63
+ random_strings.insert(0, selected_dropdown)
64
+
65
+ return random_strings
66
+
67
+ def generate_prompt_output(*args):
68
+ #all imported files
69
+ prefix_path = prefix_data_file
70
+ category_path = category_data_file
71
+ style_path = style_data_file
72
+ lightning_path = lightning_data_file
73
+ lens_path = lens_data_file
74
+
75
+ #destructure args
76
+ category, style, lightning, lens, negative_prompt = args
77
+
78
+ # Convert variables to lowercase
79
+ category = category.lower()
80
+ style = style.lower()
81
+ lightning = lightning.lower()
82
+ lens = lens.lower()
83
+
84
+ # Open category_data.json and grab correct text
85
+ with open(prefix_path, 'r') as f:
86
+ prefix_data = json.load(f)
87
+ prefix_prompt = random.sample(prefix_data, 6)
88
+ modified_prefix_prompt = [f"(({item}))" for item in prefix_prompt]
89
+
90
+
91
+ # Open category_data.json and grab correct text
92
+ with open(category_path, 'r') as f2:
93
+ category_data = json.load(f2)
94
+
95
+ if category == "none":
96
+ category_prompt = ""
97
+ elif category == "random":
98
+ category_prompt = get_random_prompt(category_data)
99
+ else:
100
+ category_prompt = get_correct_prompt(category_data, category)
101
+
102
+
103
+ # Open style_data.json and grab correct text
104
+ with open(style_path, 'r') as f3:
105
+ style_data = json.load(f3)
106
+
107
+ if style == "none":
108
+ style_prompt = ""
109
+ elif style == "random":
110
+ style_prompt = get_random_prompt(style_data)
111
+ else:
112
+ style_prompt = get_correct_prompt(style_data, style)
113
+
114
+ # Open lightning_data.json and grab correct text
115
+ with open(lightning_path, 'r') as f4:
116
+ lightning_data = json.load(f4)
117
+
118
+ if lightning == "none":
119
+ lightning_prompt = ""
120
+ elif lightning == "random":
121
+ lightning_prompt = get_random_prompt(lightning_data)
122
+ else:
123
+ lightning_prompt = get_correct_prompt(lightning_data, lightning)
124
+
125
+ # Open lens_data.json and grab correct text
126
+ with open(lens_path, 'r') as f5:
127
+ lens_data = json.load(f5)
128
+
129
+ if lens == "none":
130
+ lens_prompt = ""
131
+ elif lens == "random":
132
+ lens_prompt = get_random_prompt(lens_data)
133
+ else:
134
+ lens_prompt = get_correct_prompt(lens_data, lens)
135
+
136
+
137
+ prompt_output = modified_prefix_prompt, category_prompt, style_prompt, lightning_prompt, lens_prompt
138
+ prompt_strings = []
139
+
140
+ for sublist in prompt_output:
141
+ # Join the sublist elements into a single string
142
+ prompt_string = ", ".join(str(item) for item in sublist)
143
+ if prompt_string: # Check if the prompt_string is not empty
144
+ prompt_strings.append(prompt_string)
145
+
146
+ # Join the non-empty prompt_strings
147
+ final_output = ", ".join(prompt_strings)
148
+
149
+ return final_output
150
+
151
+ list_models = [
152
+ "SDXL-1.0",
153
+ "SD-1.5",
154
+ "OpenJourney-V4",
155
+ "Anything-V4",
156
+ "Disney-Pixar-Cartoon",
157
+ "Pixel-Art-XL",
158
+ "Dalle-3-XL",
159
+ "Midjourney-V4-XL",
160
+ ]
161
+
162
+ def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7,
163
+ seed=None, API_TOKEN = API_TOKEN):
164
+ print("call {} {} one time".format(current_model, prompt))
165
+ '''
166
+ import shutil
167
+ im_save_dir = "local_img_dir"
168
+ if not os.path.exists(im_save_dir):
169
+ #shutil.rmtree(im_save_dir)
170
+ os.mkdir(im_save_dir)
171
+ '''
172
+
173
+ if current_model == "SD-1.5":
174
+ API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
175
+ elif current_model == "SDXL-1.0":
176
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
177
+ elif current_model == "OpenJourney-V4":
178
+ API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
179
+ elif current_model == "Anything-V4":
180
+ API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0"
181
+ elif current_model == "Disney-Pixar-Cartoon":
182
+ API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon"
183
+ elif current_model == "Pixel-Art-XL":
184
+ API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
185
+ elif current_model == "Dalle-3-XL":
186
+ API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
187
+ elif current_model == "Midjourney-V4-XL":
188
+ API_URL = "https://api-inference.huggingface.co/models/openskyml/midjourney-v4-xl"
189
+
190
+ #API_TOKEN = os.environ.get("HF_READ_TOKEN")
191
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
192
+
193
+ if type(prompt) != type(""):
194
+ prompt = DEFAULT_PROMPT
195
+
196
+ if image_style == "None style":
197
+ payload = {
198
+ "inputs": prompt + ", 8k",
199
+ "is_negative": is_negative,
200
+ "steps": steps,
201
+ "cfg_scale": cfg_scale,
202
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
203
+ }
204
+ elif image_style == "Cinematic":
205
+ payload = {
206
+ "inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko",
207
+ "is_negative": is_negative + ", abstract, cartoon, stylized",
208
+ "steps": steps,
209
+ "cfg_scale": cfg_scale,
210
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
211
+ }
212
+ elif image_style == "Digital Art":
213
+ payload = {
214
+ "inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star",
215
+ "is_negative": is_negative + ", sharp , modern , bright",
216
+ "steps": steps,
217
+ "cfg_scale": cfg_scale,
218
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
219
+ }
220
+ elif image_style == "Portrait":
221
+ payload = {
222
+ "inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)",
223
+ "is_negative": is_negative,
224
+ "steps": steps,
225
+ "cfg_scale": cfg_scale,
226
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
227
+ }
228
+
229
+ image_bytes = requests.post(API_URL, headers=headers, json=payload).content
230
+ image = Image.open(io.BytesIO(image_bytes))
231
+ '''
232
+ from uuid import uuid1
233
+ path = os.path.join(im_save_dir ,"{}.png".format(uuid1()))
234
+ image.save(path)
235
+ return path
236
+ '''
237
+ return image
238
+ #yield image
239
+ #return [image]
240
+
241
+ def on_ui_tabs():
242
+ '''
243
+ # UI structure
244
+ txt2img_prompt = modules.ui.txt2img_paste_fields[0][0]
245
+ img2img_prompt = modules.ui.img2img_paste_fields[0][0]
246
+ txt2img_negative_prompt = modules.ui.txt2img_paste_fields[1][0]
247
+ img2img_negative_prompt = modules.ui.img2img_paste_fields[1][0]
248
+ '''
249
+
250
+ with gr.Blocks(css = '''
251
+ .header img {
252
+ float: middle;
253
+ width: 33px;
254
+ height: 33px;
255
+ }
256
+ .header h1 {
257
+ top: 18px;
258
+ left: 10px;
259
+ }
260
+ '''
261
+ ) as prompt_generator:
262
+ gr.HTML(
263
+ '''
264
+ <center>
265
+ <div class="header">
266
+ <h1 class = "logo"> <img src="https://huggingface.co/spaces/svjack/Next-Diffusion-Prompt-Generator/resolve/main/images/nextdiffusion_logo.png" alt="logo" /> 🧑‍🎨 Next Diffusion Prompt On Stable Diffuison </h1>
267
+ </center>
268
+ ''')
269
+
270
+ with gr.Tab("Prompt Generator"):
271
+ with gr.Row(): # Use Row to arrange two columns side by side
272
+ with gr.Column(): # Left column for dropdowns
273
+ category_choices, style_choices, lightning_choices, lens_choices = populate_dropdown_options()
274
+
275
+ with gr.Row():
276
+ gr.HTML('''<h2 id="input_header">Input 👇</h2>''')
277
+ with gr.Row():
278
+ # Create a dropdown to select
279
+ with gr.Row():
280
+ txt2img_prompt = gr.Textbox(label = "txt2img_prompt", interactive = True)
281
+ txt2img_negative_prompt = gr.Textbox(label = "txt2img_negative_prompt", interactive = True)
282
+ '''
283
+ with gr.Row():
284
+ img2img_prompt = gr.Textbox(label = "img2img_prompt", interactive = True)
285
+ img2img_negative_prompt = gr.Textbox(label = "img2img_negative_prompt", interactive = True)
286
+ '''
287
+ with gr.Row():
288
+ current_model = gr.Dropdown(label="Current Model", choices=list_models, value=list_models[1])
289
+ text_button = gr.Button("Generate image by Stable Diffusion")
290
+ with gr.Row():
291
+ image_output = gr.Image(label="Output Image", type = "filepath", elem_id="gallery", height = 512,
292
+ show_share_button = True
293
+ )
294
+ #image_gallery = gr.Gallery(height = 512, label = "Output Gallery")
295
+ #image_file = gr.File(label="Output Image File")
296
+
297
+ with gr.Column(): # Right column for result_textbox and generate_button
298
+ # Add a Textbox to display the generated text
299
+ with gr.Row():
300
+ gr.HTML('''<h2 id="output_header">Prompt Extender by Rule 👋 (aid Input 👈)</h2>''')
301
+ with gr.Row().style(equal_height=True): # Place dropdowns side by side
302
+ category_dropdown = gr.Dropdown(
303
+ choices=category_choices,
304
+ value=category_choices[1],
305
+ label="Category", show_label=True
306
+ )
307
+
308
+ style_dropdown = gr.Dropdown(
309
+ choices=style_choices,
310
+ value=style_choices[1],
311
+ label="Style", show_label=True
312
+ )
313
+ with gr.Row():
314
+ lightning_dropdown = gr.Dropdown(
315
+ choices=lightning_choices,
316
+ value=lightning_choices[1],
317
+ label="Lightning", show_label=True
318
+ )
319
+
320
+ lens_dropdown = gr.Dropdown(
321
+ choices=lens_choices,
322
+ value=lens_choices[1],
323
+ label="Lens", show_label=True
324
+ )
325
+ result_textbox = gr.Textbox(label="Generated Prompt", lines=3)
326
+ use_default_negative_prompt = gr.Checkbox(label="Include Negative Prompt", value=True, interactive=True, elem_id="negative_prompt_checkbox")
327
+ setattr(use_default_negative_prompt,"do_not_save_to_config",True)
328
+ with gr.Row():
329
+ generate_button = gr.Button(value="Generate", elem_id="generate_button")
330
+ clear_button = gr.Button(value="Clear")
331
+ with gr.Row():
332
+ txt2img = gr.Button("Send to txt2img")
333
+ #img2img = gr.Button("Send to img2img")
334
+ with gr.Row():
335
+ gr.HTML('''
336
+ <hr class="rounded" id="divider">
337
+ ''')
338
+ with gr.Row():
339
+ gr.HTML('''<h2 id="input_header">Links</h2>''')
340
+ with gr.Row():
341
+ gr.HTML('''
342
+ <h3>Stable Diffusion Tutorials⚡</h3>
343
+ <container>
344
+ <a href="https://nextdiffusion.ai" target="_blank">
345
+ <button id="website_button" class="external-link">Website</button>
346
+ </a>
347
+ <a href="https://www.youtube.com/channel/UCd9UIUkLnjE-Fj-CGFdU74Q?sub_confirmation=1" target="_blank">
348
+ <button id="youtube_button" class="external-link">YouTube</button>
349
+ </a>
350
+ </container>
351
+ ''')
352
+
353
+ '''
354
+ with gr.Accordion("Advanced settings", open=True):
355
+ negative_prompt = gr.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", lines=1, elem_id="negative-prompt-text-input")
356
+ image_style = gr.Dropdown(label="Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="None style", allow_custom_value=False) with gr.Row():
357
+ '''
358
+
359
+ # Create a button to trigger text generation
360
+ txt2img.click(add_to_prompt, inputs=[result_textbox, use_default_negative_prompt, txt2img_prompt, txt2img_negative_prompt], outputs=[txt2img_prompt, txt2img_negative_prompt ])
361
+ #img2img.click(add_to_prompt, inputs=[result_textbox, use_default_negative_prompt, img2img_prompt, img2img_negative_prompt], outputs=[img2img_prompt, img2img_negative_prompt])
362
+
363
+ clear_button.click(lambda x: [""] * 3 + ["Random", "Random", "Random", "Random"], None,
364
+ [result_textbox, txt2img_prompt, txt2img_negative_prompt,
365
+ category_dropdown, style_dropdown, lightning_dropdown, lens_dropdown
366
+ ])
367
+
368
+ text_button.click(generate_txt2img, inputs=[current_model, txt2img_prompt, txt2img_negative_prompt], outputs=image_output,
369
+
370
+ )
371
+
372
+ # Register the callback for the Generate button
373
+ generate_button.click(fn=generate_prompt_output, inputs=[category_dropdown, style_dropdown, lightning_dropdown, lens_dropdown, use_default_negative_prompt], outputs=[result_textbox])
374
+
375
+ gr.Examples(
376
+ [
377
+ ["A lovely cat", "low quality, blur", "Anime", "Drawing", "Bloom light", "F/14"],
378
+ ["A cute blue boy", "low quality, blur", "Anime", "3D style", "None", "Random"],
379
+ ],
380
+ inputs = [txt2img_prompt, txt2img_negative_prompt, category_dropdown, style_dropdown, lightning_dropdown, lens_dropdown]
381
+ )
382
+
383
+ return prompt_generator
384
+
385
+
386
+ with on_ui_tabs() as demo:
387
+ pass
388
+
389
+ demo.launch(show_api = False)