Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -22,14 +22,14 @@ os.chdir("Stable_Diffusion_Finetuned_Minecraft_Skin_Generator")
|
|
22 |
|
23 |
|
24 |
@spaces.GPU()
|
25 |
-
def run_inference(prompt, stable_diffusion_model, num_inference_steps, guidance_scale, model_precision_type, seed,
|
26 |
|
27 |
if stable_diffusion_model == '2':
|
28 |
sd_model = "minecraft-skins"
|
29 |
else:
|
30 |
sd_model = "minecraft-skins-sdxl"
|
31 |
|
32 |
-
inference_command = f"python Scripts/{sd_model}.py '{prompt}' {num_inference_steps} {guidance_scale} {model_precision_type} {seed} {
|
33 |
|
34 |
os.system(inference_command)
|
35 |
|
@@ -49,7 +49,7 @@ num_inference_steps = gr.Number(label="Number of Inference Steps", precision=0,
|
|
49 |
guidance_scale = gr.Number(minimum=0.1, value=7.5, label="Guidance Scale", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference")
|
50 |
model_precision_type = gr.Dropdown(["fp16", "fp32"], value="fp16", label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results")
|
51 |
seed = gr.Number(value=42, label="Seed", info="A starting point to initiate generation, put 0 for a random one")
|
52 |
-
|
53 |
verbose = gr.Checkbox(label="Verbose Output", info="Produce more detailed output while running", value=False)
|
54 |
|
55 |
|
@@ -63,7 +63,7 @@ gr.Interface(
|
|
63 |
guidance_scale,
|
64 |
model_precision_type,
|
65 |
seed,
|
66 |
-
|
67 |
verbose
|
68 |
],
|
69 |
outputs=[
|
|
|
22 |
|
23 |
|
24 |
@spaces.GPU()
|
25 |
+
def run_inference(prompt, stable_diffusion_model, num_inference_steps, guidance_scale, model_precision_type, seed, filename, verbose):
|
26 |
|
27 |
if stable_diffusion_model == '2':
|
28 |
sd_model = "minecraft-skins"
|
29 |
else:
|
30 |
sd_model = "minecraft-skins-sdxl"
|
31 |
|
32 |
+
inference_command = f"python Scripts/{sd_model}.py '{prompt}' {num_inference_steps} {guidance_scale} {model_precision_type} {seed} {filename} {'--verbose' if verbose else ''}"
|
33 |
|
34 |
os.system(inference_command)
|
35 |
|
|
|
49 |
guidance_scale = gr.Number(minimum=0.1, value=7.5, label="Guidance Scale", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference")
|
50 |
model_precision_type = gr.Dropdown(["fp16", "fp32"], value="fp16", label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results")
|
51 |
seed = gr.Number(value=42, label="Seed", info="A starting point to initiate generation, put 0 for a random one")
|
52 |
+
filename = gr.Textbox(label="Output Image Name", info="The name of the file of the output image skin, keep the .png", value="output-skin.png")
|
53 |
verbose = gr.Checkbox(label="Verbose Output", info="Produce more detailed output while running", value=False)
|
54 |
|
55 |
|
|
|
63 |
guidance_scale,
|
64 |
model_precision_type,
|
65 |
seed,
|
66 |
+
filename,
|
67 |
verbose
|
68 |
],
|
69 |
outputs=[
|