baqu2213 commited on
Commit
36b7791
โ€ข
1 Parent(s): 380d271

add ADetailer request

Browse files
.gitattributes CHANGED
@@ -115,3 +115,4 @@ Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/NAIA_0107_testv3.exe filte
115
  Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/NAIA_0108_testv3.exe filter=lfs diff=lfs merge=lfs -text
116
  Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/NAIA_0110_testv3.exe filter=lfs diff=lfs merge=lfs -text
117
  Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/WEBUIA_0119.exe filter=lfs diff=lfs merge=lfs -text
 
 
115
  Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/NAIA_0108_testv3.exe filter=lfs diff=lfs merge=lfs -text
116
  Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/NAIA_0110_testv3.exe filter=lfs diff=lfs merge=lfs -text
117
  Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/WEBUIA_0119.exe filter=lfs diff=lfs merge=lfs -text
118
+ Danbooru[[:space:]]Prompt[[:space:]]Selector/TEST2024/WEBUIA_0124.exe filter=lfs diff=lfs merge=lfs -text
Danbooru Prompt Selector/TEST2024/NAIA_generation (WEBUIA_0124).py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import random
3
+ from PIL import Image, ImageOps, ImageTk
4
+ from datetime import datetime
5
+ import time
6
+ from pathlib import Path
7
+ import io
8
+ import zipfile
9
+
10
+ #for webui
11
+ import base64
12
+ import re
13
+ import json
14
+
15
+ BASE_URL="https://api.novelai.net"
16
+
17
+ def make_turbo_prompt(gen_request):
18
+ lines = gen_request['prompt']
19
+ result = {
20
+ "boys": False,
21
+ "girls": False,
22
+ "1girl": False,
23
+ "1boy": False,
24
+ "1other": False,
25
+ "others": False
26
+ }
27
+ state = {
28
+ "nude,": False,
29
+ "pov,": False,
30
+ "cum,": False,
31
+ "after ": False,
32
+ "pussy juice": False,
33
+ "barefoot": False,
34
+ "breasts": False,
35
+ "ejaculation": False,
36
+ }
37
+
38
+ def insert_spaces(source_list, reference_list):
39
+ modified_list = source_list.copy()
40
+ for index, keyword in enumerate(reference_list):
41
+ if keyword not in source_list:
42
+ space_count = len(keyword) # ํ‚ค์›Œ๋“œ ๊ธธ์ด๋งŒํผ์˜ ๊ณต๋ฐฑ ๋ฌธ์ž
43
+ modified_list.insert(index, ' ' * space_count)
44
+ return modified_list
45
+
46
+ keywords = gen_request['prompt'].split(', ')
47
+ filtered_keywords = []
48
+ removed_indices = []
49
+ positive0, positive1, positive2, positive3 = gen_request.copy(),gen_request.copy(),gen_request.copy(),gen_request.copy()
50
+
51
+ for word in result.keys():
52
+ if word in lines:
53
+ result[word] = True
54
+ for word in state.keys():
55
+ if word in gen_request['prompt']:
56
+ state[word] = True
57
+
58
+ key_index = int((len(keywords)/2)-1)
59
+
60
+ if(result["1boy"]) or (result["boys"]):
61
+ if(result["1girl"]):
62
+ if('sex,' in gen_request['prompt']):
63
+ sex_pos_keywords = ['stomach bulge','insertion', 'fucked silly', 'x-ray', 'orgasm', 'cross-section', 'uterus', 'overflow', 'rape', 'vaginal', 'anal']
64
+ facial_keywords = ['tongue','ahegao']
65
+ temp_sex_pos = []
66
+ temp_facial = []
67
+ cum_events = []
68
+ explicit_check = []
69
+ if 'open mouth' in keywords: keywords.remove('open mouth')
70
+ if 'closed mouth' in keywords: keywords.remove('closed mouth')
71
+ if 'after rape' in keywords:
72
+ keywords.remove('after rape')
73
+ explicit_check.append('after rape')
74
+ for keyword in keywords:
75
+ if ('sex' not in keyword and 'cum' not in keyword and 'ejaculation' not in keyword and 'vaginal' not in keyword and 'penetration' not in keyword) and all(sex_pos not in keyword for sex_pos in sex_pos_keywords) and all(facial not in keyword for facial in facial_keywords):
76
+ filtered_keywords.append(keyword)
77
+ elif 'sex' in keyword:
78
+ removed_indices.append(keyword)
79
+ elif 'penetration' in keyword:
80
+ removed_indices.append(keyword)
81
+ elif 'cum' in keyword and keyword != 'cum':
82
+ cum_events.append(keyword)
83
+ elif any(sex_pos in keyword for sex_pos in sex_pos_keywords):
84
+ for sex_pos in sex_pos_keywords:
85
+ if sex_pos in keyword:
86
+ temp_sex_pos.append(sex_pos)
87
+ elif any(facial not in keyword for facial in facial_keywords):
88
+ for facial in facial_keywords:
89
+ if facial in keyword:
90
+ temp_facial.append(facial)
91
+ filtered_keywords.insert(int((len(filtered_keywords)/2)-1), ' no penetration, imminent penetration')
92
+ filtered_keywords_positive0 = filtered_keywords.copy()
93
+ filtered_keywords.remove(' no penetration, imminent penetration')
94
+ #0 imminent penetration, imminent sex
95
+ for i, keyword in enumerate(filtered_keywords):
96
+ if 'pantyhose' in keyword:
97
+ filtered_keywords[i] = 'torn ' + filtered_keywords[i]
98
+ #1 default
99
+ key_index = int((len(filtered_keywords)/2)-1)
100
+ if 'pussy' in filtered_keywords: key_index = filtered_keywords.index('pussy')
101
+ if 'penis' in filtered_keywords: key_index = filtered_keywords.index('penis')
102
+ filtered_keywords[key_index:key_index] = ['motion lines', 'surprised']
103
+ for keyword in removed_indices:
104
+ if 'cum' not in keyword and 'ejaculation' not in keyword:
105
+ filtered_keywords.insert(key_index,keyword)
106
+ if(temp_sex_pos): filtered_keywords[key_index:key_index] = temp_sex_pos
107
+ if('clothed sex' in filtered_keywords and not 'bottomless' in filtered_keywords): filtered_keywords.insert(filtered_keywords.index('clothed sex')+1, 'bottomless')
108
+ pos1_copied_keywords = filtered_keywords.copy()
109
+ for i, keyword in enumerate(pos1_copied_keywords):
110
+ if 'closed eyes' in keyword:
111
+ rand_num = random.randint(0,2)
112
+ if(rand_num == 0): pos1_copied_keywords[i] = 'half-' + pos1_copied_keywords[i]
113
+ elif(rand_num == 1 and 'closed eyes' in pos1_copied_keywords):
114
+ pos1_copied_keywords.remove('closed eyes')
115
+ filtered_keywords[i] = 'half-closed eyes'
116
+ filtered_keywords_positive1 = pos1_copied_keywords.copy()
117
+ #2 ejaculation,cum in pussy
118
+ key_index = filtered_keywords.index('surprised')
119
+ filtered_keywords.remove('surprised')
120
+ filtered_keywords[key_index:key_index] = ["ejaculation","cum"]
121
+ for keyword in removed_indices:
122
+ if 'cum' in keyword:
123
+ filtered_keywords.insert(key_index,keyword)
124
+ if(temp_facial): filtered_keywords[key_index:key_index] =temp_facial
125
+ filtered_keywords_positive2 = filtered_keywords.copy()
126
+ #3 after sex, after ejaculation
127
+ for i, keyword in enumerate(filtered_keywords):
128
+ if 'closed eyes' in keyword:
129
+ rand_num = random.randint(0,2)
130
+ if(rand_num == 0 and filtered_keywords[i] != 'half-closed eyes'): filtered_keywords[i] = 'half-' + filtered_keywords[i]
131
+ elif(rand_num == 1): filtered_keywords[i] = 'empty eyes'
132
+ else: filtered_keywords[i] = 'empty eyes, half-closed eyes'
133
+ if 'sex' in filtered_keywords:
134
+ key_index = filtered_keywords.index('sex')
135
+ elif 'group sex' in filtered_keywords:
136
+ key_index = filtered_keywords.index('group sex')
137
+ filtered_keywords.remove('ejaculation')
138
+ filtered_keywords[key_index:key_index] = ['cum drip', 'erection'] + cum_events
139
+ if(explicit_check): filtered_keywords[key_index:key_index] = explicit_check
140
+ if 'sex' in filtered_keywords and 'group sex' not in filtered_keywords:
141
+ if('pussy' in filtered_keywords and not 'anal' in filtered_keywords): filtered_keywords.insert(filtered_keywords.index('sex')+1, 'after vaginal, spread pussy')
142
+ elif('anal' in filtered_keywords): filtered_keywords.insert(filtered_keywords.index('sex')+1, 'after anus, cum in ass')
143
+ filtered_keywords.insert(filtered_keywords.index('sex'), 'after sex')
144
+ filtered_keywords.remove('sex')
145
+ elif 'group sex' in filtered_keywords:
146
+ if('vaginal' in filtered_keywords and not 'anal' in filtered_keywords):
147
+ filtered_keywords.insert(filtered_keywords.index('group sex')+1, 'after vaginal, spread pussy')
148
+ if 'multiple penises' in filtered_keywords: filtered_keywords.insert(filtered_keywords.index('group sex')+3, 'cum on body, bukkake')
149
+ elif('anal' in filtered_keywords):
150
+ filtered_keywords.insert(filtered_keywords.index('group sex')+1, 'after anus, cum in ass')
151
+ if 'multiple penises' in filtered_keywords: filtered_keywords.insert(filtered_keywords.index('group sex')+3, 'cum on body, bukkake')
152
+ else: filtered_keywords.insert(filtered_keywords.index('group sex')+1, 'cum on body, {bukkake}')
153
+ temp_post_keyword = []
154
+ for keyword in sex_pos_keywords:
155
+ if not (keyword == 'orgasm' or keyword == 'overflow'):
156
+ if keyword in filtered_keywords:
157
+ temp_post_keyword.append(keyword)
158
+ for keyword in temp_post_keyword:
159
+ filtered_keywords.remove(keyword)
160
+
161
+ positive0['prompt'] = ', '.join(insert_spaces(filtered_keywords_positive0, filtered_keywords)).strip()
162
+ positive1['prompt'] = ', '.join(insert_spaces(filtered_keywords_positive1, filtered_keywords)).strip()
163
+ positive2['prompt'] = ', '.join(insert_spaces(filtered_keywords_positive2, filtered_keywords)).strip()
164
+ positive3['prompt'] = ', '.join(filtered_keywords).strip()
165
+ positive0["type"] = "turbo"
166
+ positive1["type"] = "turbo"
167
+ positive2["type"] = "turbo"
168
+ positive3["type"] = "turbo"
169
+ return positive0, positive1, positive2, positive3
170
+
171
+ def generate_image_NAI(access_token, prompt, model, action, parameters):
172
+ data = {
173
+ "input": prompt,
174
+ "model": model,
175
+ "action": action,
176
+ "parameters": parameters,
177
+ }
178
+
179
+ response = requests.post(f"{BASE_URL}/ai/generate-image", json=data, headers={ "Authorization": f"Bearer {access_token}" })
180
+ # catch any errors
181
+ return response.content
182
+
183
+ # WebUI
184
+ def convert_prompt(prompt):
185
+ keywords = [keyword.strip() for keyword in prompt.split(',')]
186
+ converted_keywords = []
187
+ for keyword in keywords:
188
+ if '{' in keyword:
189
+ keyword = keyword.replace('{', '(').replace('}', ')')
190
+ converted_keywords.append(keyword)
191
+ return ', '.join(converted_keywords)
192
+
193
+ def generate_image_webui(access_token, prompt, model, action, parameters):
194
+ samplers = {
195
+ "k_euler": "Euler",
196
+ "k_euler_ancestral": "Euler a",
197
+ "k_dpmpp_2s_ancestral": "DPM++ 2S a",
198
+ "k_dpmpp_sde": "DPM++ SDE"
199
+ }
200
+
201
+ # matching data format
202
+ data = {
203
+ "input": prompt,
204
+ "model": model,
205
+ "action": action,
206
+ "parameters": parameters,
207
+ }
208
+
209
+ params = {
210
+ "prompt": convert_prompt(data['input']),
211
+ "negative_prompt": convert_prompt(data['parameters']['negative_prompt']),
212
+ "steps": data['parameters']['steps'],
213
+ "width": data['parameters']['width'],
214
+ "height": data['parameters']['height'],
215
+ "cfg_scale": data['parameters']['scale'],
216
+ "sampler_index": samplers[data['parameters']['sampler']],
217
+ "seed": data['parameters']['seed'],
218
+ "seed_resize_from_h": -1,
219
+ "seed_resize_from_w": -1,
220
+ "denoising_strength": None,
221
+ "n_iter": "1",
222
+ "batch_size": data['parameters']['n_samples']
223
+ }
224
+
225
+ if data['parameters']['enable_hr'] == True:
226
+ params['enable_hr'] = True
227
+ params["hr_upscaler"] = data['parameters']["hr_upscaler"]
228
+ params["hr_scale"] = data['parameters']["hr_scale"]
229
+ params["hr_second_pass_steps"] = data['parameters']["hr_second_pass_steps"]
230
+ params["denoising_strength"] = data['parameters']["denoising_strength"]
231
+
232
+ if data['parameters']['enable_AD'] == True:
233
+ params["alwayson_scripts"] = {"ADetailer":
234
+ {
235
+ "args": [
236
+ True,
237
+ False,
238
+ {
239
+ "ad_model": "face_yolov8n.pt",
240
+ "ad_prompt": params["prompt"],
241
+ "ad_negative_prompt": params["negative_prompt"],
242
+ "ad_inpaint_only_masked": True,
243
+ "ad_inpaint_only_masked_padding": 32,
244
+ "ad_sampler" : samplers[data['parameters']['sampler']]
245
+ }
246
+ ]
247
+ }
248
+ }
249
+
250
+ res = requests.post(f"{access_token}/sdapi/v1/txt2img", json=params)
251
+ imageb64s = res.json()['images']
252
+ content = None
253
+ for b64 in imageb64s:
254
+ img = b64.encode()
255
+ content = base64.b64decode(img)
256
+
257
+ s = io.BytesIO()
258
+ zf = zipfile.ZipFile(s, "w")
259
+ zf.writestr("generated", content)
260
+ zf.close()
261
+ return s.getvalue()
262
+
263
+ # WebUI
264
+ def generate_image(access_token, prompt, model, action, parameters):
265
+ if re.match(r'^http[s]?://', access_token):
266
+ return generate_image_webui(**locals())
267
+ return generate_image_NAI(**locals())
268
+
269
+ def generate(gen_request):
270
+
271
+ params = {
272
+ "legacy": False,
273
+ "quality_toggle": True if gen_request["quality_toggle"] == 1 else False,
274
+ "width": gen_request["width"],
275
+ "height": gen_request["height"],
276
+ "n_samples": 1,
277
+ "seed": gen_request["seed"],
278
+ "extra_noise_seed": random.randint(0,9999999999),
279
+ "sampler": gen_request["sampler"],
280
+ "steps": 28 if (gen_request["type"]!="upper" or "steps" not in gen_request) else gen_request["steps"],
281
+ "scale": gen_request["scale"],
282
+ "uncond_scale": 1.0,
283
+ "negative_prompt": gen_request["negative"],
284
+ "sm" : gen_request["sema"],
285
+ "sm_dyn" : gen_request["sema_dyn"],
286
+ "decrisper": False,
287
+ "controlnet_strength": 1.0,
288
+ "add_original_image": False,
289
+ "cfg_rescale": gen_request["cfg_rescale"],
290
+ "noise_schedule": "native",
291
+ "enable_hr" : gen_request["enable_hr"],
292
+ "enable_AD" : gen_request["enable_AD"]
293
+ }
294
+
295
+ if params["enable_hr"] == True:
296
+ params["hr_upscaler"] = gen_request["hr_upscaler"]
297
+ params["hr_scale"] = gen_request["hr_scale"]
298
+ params["hr_second_pass_steps"] = gen_request["hr_second_pass_steps"]
299
+ params["denoising_strength"] = gen_request["denoising_strength"]
300
+
301
+ # ์™€์ผ๋“œ์นด๋“œ ๊ธฐ๋Šฅ ๋งŒ๋“ค์–ด์•ผํ•จ
302
+ positive = gen_request["prompt"]
303
+
304
+ filename_rule = gen_request["png_rule"]
305
+ save_folder = gen_request["save_folder"]
306
+
307
+ access_token = gen_request["access_token"]
308
+ additional_folder = ""
309
+
310
+ def resize_and_fill(image, max_size=None):
311
+ if max_size is None:
312
+ max_size = gen_request["user_screen_size"]
313
+ original_width, original_height = image.size
314
+ if original_width > max_size or original_height > max_size:
315
+ # ๋น„์œจ์„ ์œ ์ง€ํ•˜๋ฉด์„œ ํฌ๊ธฐ ์กฐ์ •
316
+ image.thumbnail((max_size, max_size))
317
+
318
+ # ์ƒˆ ์ด๋ฏธ์ง€ ํฌ๊ธฐ ๊ณ„์‚ฐ
319
+ width, height = image.size
320
+ new_image = Image.new("RGB", (max_size, max_size), "black")
321
+ new_image.paste(image, ((max_size - width) // 2, (max_size - height) // 2))
322
+ return new_image
323
+ else:
324
+ return image
325
+
326
+ def log_error(e, output_file_path="output_file_path"):
327
+ # ํ˜„์žฌ ์‹œ๊ฐ„์„ ์–ป์Šต๋‹ˆ๋‹ค
328
+ current_time = datetime.now().strftime("%m/%d %H:%M:%S")
329
+
330
+ # ์—๋Ÿฌ ๋กœ๊ทธ ๋ฉ”์‹œ์ง€
331
+ error_message = f"#### Error occured at {current_time} ####\nError: {e}\n############################################\n"
332
+
333
+ # ์ง€์ •๋œ ์ถœ๋ ฅ ํด๋”์˜ error_log.txt ํŒŒ์ผ์— ์“ฐ๊ธฐ
334
+ with open(f"error_log.txt", "a") as file:
335
+ file.write(error_message)
336
+
337
+ try:
338
+ zipped_bytes = generate_image(access_token, positive, "nai-diffusion-3", "generate", params)
339
+ if gen_request["png_rule"] == "count":
340
+ additional_folder = "/" + gen_request["start_time"]
341
+ if gen_request["type"] == "turbo":
342
+ additional_folder += "/turbo"
343
+ d = Path(save_folder + additional_folder)
344
+ d.mkdir(parents=True, exist_ok=True)
345
+ zipped = zipfile.ZipFile(io.BytesIO(zipped_bytes))
346
+ image_bytes = zipped.read(zipped.infolist()[0])
347
+ if gen_request["png_rule"] == "count":
348
+ _count = gen_request["count"]
349
+ filename = (d / f"{_count:05}.png" )
350
+ else: filename = (d / f"{datetime.now().strftime('%Y%m%d_%H%M%S')}.png" )
351
+ filename.write_bytes(image_bytes)
352
+ i = Image.open(io.BytesIO(image_bytes))
353
+ i = ImageOps.exif_transpose(i).convert("RGB")
354
+ i_resized = resize_and_fill(i)
355
+ #tk_image = ImageTk.PhotoImage(i_resized)
356
+ return i_resized, positive, params['seed'], i.info, str(filename)
357
+ except Exception as e:
358
+ try:
359
+ if zipped_bytes is None:
360
+ raise ValueError("Connection broken (Protocol Error)")
361
+ error_message = zipped_bytes.decode('utf-8')[2:-2]
362
+ except Exception as inner_exception:
363
+ error_message = str(inner_exception)
364
+ log_error(error_message, "path_to_output_folder")
365
+ return None, error_message, params['seed'], None, None
Danbooru Prompt Selector/TEST2024/WEBUIA_0124.exe ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e809f61fb674f04d32fdbf2f27323c81cbede6c33b2bfc47fe9a36f2b34548d
3
+ size 837840692
Danbooru Prompt Selector/TEST2024/WEBUIA_0124.py ADDED
The diff for this file is too large to render. See raw diff