Spaces:
Runtime error
Runtime error
New v2
Browse files
app.py
CHANGED
@@ -81,7 +81,6 @@ def parse_json_parameters(json_str):
|
|
81 |
if key not in params:
|
82 |
raise ValueError(f"Missing required key: {key}")
|
83 |
|
84 |
-
# Parse resolution
|
85 |
width, height = map(int, params['resolution'].split(' x '))
|
86 |
|
87 |
return {
|
@@ -220,19 +219,15 @@ def generate(
|
|
220 |
pipe.scheduler = backup_scheduler
|
221 |
utils.free_memory()
|
222 |
|
223 |
-
# Initialize an empty list to store the generation history
|
224 |
generation_history = []
|
225 |
|
226 |
-
# Function to update the history list
|
227 |
def update_history_list():
|
228 |
return [item["image"] for item in generation_history]
|
229 |
|
230 |
-
# Function to handle image click in history
|
231 |
def handle_image_click(evt: gr.SelectData):
|
232 |
selected = generation_history[evt.index]
|
233 |
return selected["image"], json.dumps(selected["metadata"], indent=2)
|
234 |
|
235 |
-
# Modify the generate function to add results to the history
|
236 |
def generate_and_update_history(*args, **kwargs):
|
237 |
images, metadata = generate(*args, **kwargs)
|
238 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
@@ -242,27 +237,24 @@ def generate_and_update_history(*args, **kwargs):
|
|
242 |
"image": images[0],
|
243 |
"metadata": metadata
|
244 |
})
|
245 |
-
if len(generation_history) > 10:
|
246 |
generation_history.pop()
|
247 |
return images[0], json.dumps(metadata, indent=2), update_history_list()
|
248 |
|
249 |
-
# Load the character list
|
250 |
with open('characterfull.txt', 'r') as f:
|
251 |
characters = [line.strip() for line in f.readlines()]
|
252 |
|
253 |
-
# Function to get a random character
|
254 |
def get_random_character():
|
255 |
return random.choice(characters)
|
256 |
|
257 |
-
# Function to add quality tags
|
258 |
def add_quality_tags(prompt, negative_prompt):
|
259 |
-
positive_tags = "score_9, score_8_up, score_7_up, score_6_up, dramatic lighting
|
260 |
-
negative_tags = "worst quality, low quality, text, censored, deformed, bad hand, blurry, (watermark), mutated hands, monochrome
|
261 |
|
262 |
-
|
263 |
-
|
264 |
|
265 |
-
return
|
266 |
|
267 |
if torch.cuda.is_available():
|
268 |
pipe = load_pipeline(MODEL)
|
@@ -306,8 +298,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
306 |
placeholder="Enter a negative prompt",
|
307 |
value=""
|
308 |
)
|
309 |
-
|
310 |
-
aspect_ratio_selector = gr.Radio(
|
311 |
label="Aspect Ratio",
|
312 |
choices=config.aspect_ratios,
|
313 |
value="1024 x 1024",
|
@@ -374,6 +365,7 @@ aspect_ratio_selector = gr.Radio(
|
|
374 |
step=1,
|
375 |
value=28,
|
376 |
)
|
|
|
377 |
with gr.Accordion(label="Generation Parameters", open=False):
|
378 |
gr_metadata = gr.JSON(label="Metadata", show_label=False)
|
379 |
json_input = gr.TextArea(label="Edit/Paste JSON Parameters", placeholder="Paste or edit JSON parameters here")
|
|
|
81 |
if key not in params:
|
82 |
raise ValueError(f"Missing required key: {key}")
|
83 |
|
|
|
84 |
width, height = map(int, params['resolution'].split(' x '))
|
85 |
|
86 |
return {
|
|
|
219 |
pipe.scheduler = backup_scheduler
|
220 |
utils.free_memory()
|
221 |
|
|
|
222 |
generation_history = []
|
223 |
|
|
|
224 |
def update_history_list():
|
225 |
return [item["image"] for item in generation_history]
|
226 |
|
|
|
227 |
def handle_image_click(evt: gr.SelectData):
|
228 |
selected = generation_history[evt.index]
|
229 |
return selected["image"], json.dumps(selected["metadata"], indent=2)
|
230 |
|
|
|
231 |
def generate_and_update_history(*args, **kwargs):
|
232 |
images, metadata = generate(*args, **kwargs)
|
233 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
237 |
"image": images[0],
|
238 |
"metadata": metadata
|
239 |
})
|
240 |
+
if len(generation_history) > 10:
|
241 |
generation_history.pop()
|
242 |
return images[0], json.dumps(metadata, indent=2), update_history_list()
|
243 |
|
|
|
244 |
with open('characterfull.txt', 'r') as f:
|
245 |
characters = [line.strip() for line in f.readlines()]
|
246 |
|
|
|
247 |
def get_random_character():
|
248 |
return random.choice(characters)
|
249 |
|
|
|
250 |
def add_quality_tags(prompt, negative_prompt):
|
251 |
+
positive_tags = "score_9, score_8_up, score_7_up, score_6_up, dramatic lighting"
|
252 |
+
negative_tags = "worst quality, low quality, text, censored, deformed, bad hand, blurry, (watermark), mutated hands, monochrome"
|
253 |
|
254 |
+
new_prompt = f"{positive_tags}, {prompt}" if prompt else positive_tags
|
255 |
+
new_negative_prompt = f"{negative_tags}, {negative_prompt}" if negative_prompt else negative_tags
|
256 |
|
257 |
+
return new_prompt, new_negative_prompt
|
258 |
|
259 |
if torch.cuda.is_available():
|
260 |
pipe = load_pipeline(MODEL)
|
|
|
298 |
placeholder="Enter a negative prompt",
|
299 |
value=""
|
300 |
)
|
301 |
+
aspect_ratio_selector = gr.Radio(
|
|
|
302 |
label="Aspect Ratio",
|
303 |
choices=config.aspect_ratios,
|
304 |
value="1024 x 1024",
|
|
|
365 |
step=1,
|
366 |
value=28,
|
367 |
)
|
368 |
+
|
369 |
with gr.Accordion(label="Generation Parameters", open=False):
|
370 |
gr_metadata = gr.JSON(label="Metadata", show_label=False)
|
371 |
json_input = gr.TextArea(label="Edit/Paste JSON Parameters", placeholder="Paste or edit JSON parameters here")
|