Spaces:
Running
on
L40S
Running
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -196,14 +196,14 @@ def randomize_loras(selected_indices, loras_state):
|
|
196 |
lora_scale_1 = 1.15
|
197 |
lora_scale_2 = 1.15
|
198 |
lora_scale_3 = 1.15
|
199 |
-
lora_image_1 = lora1
|
200 |
-
lora_image_2 = lora2
|
201 |
-
lora_image_3 = lora3
|
202 |
random_prompt = random.choice(prompt_values)
|
203 |
return selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3, random_prompt
|
204 |
except Exception as e:
|
205 |
print(f"Error in randomize_loras: {str(e)}")
|
206 |
-
return "Error", "Error", "Error", [], 1.15, 1.15, 1.15,
|
207 |
|
208 |
def add_custom_lora(custom_lora, selected_indices, current_loras):
|
209 |
if custom_lora:
|
@@ -366,7 +366,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
366 |
|
367 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
368 |
try:
|
369 |
-
# 한글 감지 및 번역
|
370 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
371 |
translated = translator(prompt, max_length=512)[0]['translation_text']
|
372 |
print(f"Original prompt: {prompt}")
|
@@ -378,7 +378,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
378 |
|
379 |
selected_loras = [loras_state[idx] for idx in selected_indices]
|
380 |
|
381 |
-
# Build the prompt with trigger words
|
382 |
prepends = []
|
383 |
appends = []
|
384 |
for lora in selected_loras:
|
@@ -396,41 +396,52 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
396 |
pipe.unload_lora_weights()
|
397 |
pipe_i2i.unload_lora_weights()
|
398 |
|
399 |
-
print(pipe.get_active_adapters())
|
|
|
400 |
# Load LoRA weights with respective scales
|
401 |
lora_names = []
|
402 |
lora_weights = []
|
403 |
with calculateDuration("Loading LoRA weights"):
|
404 |
for idx, lora in enumerate(selected_loras):
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
|
|
414 |
else:
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
|
|
|
|
|
|
421 |
print("Loaded LoRAs:", lora_names)
|
422 |
print("Adapter weights:", lora_weights)
|
423 |
-
|
424 |
-
|
|
|
|
|
|
|
|
|
425 |
else:
|
426 |
-
|
427 |
-
|
428 |
-
|
|
|
|
|
|
|
429 |
with calculateDuration("Randomizing seed"):
|
430 |
if randomize_seed:
|
431 |
seed = random.randint(0, MAX_SEED)
|
432 |
|
433 |
-
# Generate image
|
434 |
if image_input is not None:
|
435 |
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
|
436 |
else:
|
@@ -442,15 +453,12 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
442 |
final_image = image
|
443 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
444 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
445 |
-
|
446 |
-
|
447 |
|
448 |
if final_image is None:
|
449 |
raise Exception("Failed to generate image")
|
450 |
|
451 |
return final_image, seed, gr.update(visible=False)
|
452 |
|
453 |
-
|
454 |
except Exception as e:
|
455 |
print(f"Error in run_lora: {str(e)}")
|
456 |
return None, seed, gr.update(visible=False)
|
|
|
196 |
lora_scale_1 = 1.15
|
197 |
lora_scale_2 = 1.15
|
198 |
lora_scale_3 = 1.15
|
199 |
+
lora_image_1 = lora1.get('image', 'path/to/default/image.png')
|
200 |
+
lora_image_2 = lora2.get('image', 'path/to/default/image.png')
|
201 |
+
lora_image_3 = lora3.get('image', 'path/to/default/image.png')
|
202 |
random_prompt = random.choice(prompt_values)
|
203 |
return selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3, random_prompt
|
204 |
except Exception as e:
|
205 |
print(f"Error in randomize_loras: {str(e)}")
|
206 |
+
return "Error", "Error", "Error", [], 1.15, 1.15, 1.15, 'path/to/default/image.png', 'path/to/default/image.png', 'path/to/default/image.png', ""
|
207 |
|
208 |
def add_custom_lora(custom_lora, selected_indices, current_loras):
|
209 |
if custom_lora:
|
|
|
366 |
|
367 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
368 |
try:
|
369 |
+
# 한글 감지 및 번역 (이 부분은 그대로 유지)
|
370 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
371 |
translated = translator(prompt, max_length=512)[0]['translation_text']
|
372 |
print(f"Original prompt: {prompt}")
|
|
|
378 |
|
379 |
selected_loras = [loras_state[idx] for idx in selected_indices]
|
380 |
|
381 |
+
# Build the prompt with trigger words (이 부분은 그대로 유지)
|
382 |
prepends = []
|
383 |
appends = []
|
384 |
for lora in selected_loras:
|
|
|
396 |
pipe.unload_lora_weights()
|
397 |
pipe_i2i.unload_lora_weights()
|
398 |
|
399 |
+
print(f"Active adapters before loading: {pipe.get_active_adapters()}")
|
400 |
+
|
401 |
# Load LoRA weights with respective scales
|
402 |
lora_names = []
|
403 |
lora_weights = []
|
404 |
with calculateDuration("Loading LoRA weights"):
|
405 |
for idx, lora in enumerate(selected_loras):
|
406 |
+
try:
|
407 |
+
lora_name = f"lora_{idx}"
|
408 |
+
lora_path = lora['repo']
|
409 |
+
weight_name = lora.get("weights")
|
410 |
+
print(f"Loading LoRA {lora_name} from {lora_path}")
|
411 |
+
if image_input is not None:
|
412 |
+
if weight_name:
|
413 |
+
pipe_i2i.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=lora_name)
|
414 |
+
else:
|
415 |
+
pipe_i2i.load_lora_weights(lora_path, adapter_name=lora_name)
|
416 |
else:
|
417 |
+
if weight_name:
|
418 |
+
pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=lora_name)
|
419 |
+
else:
|
420 |
+
pipe.load_lora_weights(lora_path, adapter_name=lora_name)
|
421 |
+
lora_names.append(lora_name)
|
422 |
+
lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2 if idx == 1 else lora_scale_3)
|
423 |
+
except Exception as e:
|
424 |
+
print(f"Failed to load LoRA {lora_name}: {str(e)}")
|
425 |
+
|
426 |
print("Loaded LoRAs:", lora_names)
|
427 |
print("Adapter weights:", lora_weights)
|
428 |
+
|
429 |
+
if lora_names:
|
430 |
+
if image_input is not None:
|
431 |
+
pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
|
432 |
+
else:
|
433 |
+
pipe.set_adapters(lora_names, adapter_weights=lora_weights)
|
434 |
else:
|
435 |
+
print("No LoRAs were successfully loaded.")
|
436 |
+
return None, seed, gr.update(visible=False)
|
437 |
+
|
438 |
+
print(f"Active adapters after loading: {pipe.get_active_adapters()}")
|
439 |
+
|
440 |
+
# 여기서부터 이미지 생성 로직 (이 부분은 그대로 유지)
|
441 |
with calculateDuration("Randomizing seed"):
|
442 |
if randomize_seed:
|
443 |
seed = random.randint(0, MAX_SEED)
|
444 |
|
|
|
445 |
if image_input is not None:
|
446 |
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
|
447 |
else:
|
|
|
453 |
final_image = image
|
454 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
455 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
|
|
|
|
456 |
|
457 |
if final_image is None:
|
458 |
raise Exception("Failed to generate image")
|
459 |
|
460 |
return final_image, seed, gr.update(visible=False)
|
461 |
|
|
|
462 |
except Exception as e:
|
463 |
print(f"Error in run_lora: {str(e)}")
|
464 |
return None, seed, gr.update(visible=False)
|