Norod78 commited on
Commit
f65864c
1 Parent(s): c17aff6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -9
app.py CHANGED
@@ -4,7 +4,7 @@ import torch
4
  from PIL import Image
5
 
6
  model_id = 'Norod78/sd2-simpsons-blip'
7
- prefix = ''
8
 
9
  scheduler = DPMSolverMultistepScheduler(
10
  beta_start=0.00085,
@@ -37,10 +37,9 @@ def error_str(error, title="Error"):
37
  return f"""#### {title}
38
  {error}""" if error else ""
39
 
40
- def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
41
 
42
  generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
43
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
44
 
45
  try:
46
  if img is not None:
@@ -110,7 +109,7 @@ with gr.Blocks(css=css) as demo:
110
  with gr.Column(scale=55):
111
  with gr.Group():
112
  with gr.Row():
113
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False)
114
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
115
 
116
  image_out = gr.Image(height=512)
@@ -119,8 +118,7 @@ with gr.Blocks(css=css) as demo:
119
  with gr.Column(scale=45):
120
  with gr.Tab("Options"):
121
  with gr.Group():
122
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
123
- auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix)
124
 
125
  with gr.Row():
126
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
@@ -137,9 +135,7 @@ with gr.Blocks(css=css) as demo:
137
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
138
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
139
 
140
- auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
141
-
142
- inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
143
  outputs = [image_out, error_output]
144
  prompt.submit(inference, inputs=inputs, outputs=outputs)
145
  generate.click(inference, inputs=inputs, outputs=outputs)
 
4
  from PIL import Image
5
 
6
  model_id = 'Norod78/sd2-simpsons-blip'
7
+ prefix = None
8
 
9
  scheduler = DPMSolverMultistepScheduler(
10
  beta_start=0.00085,
 
37
  return f"""#### {title}
38
  {error}""" if error else ""
39
 
40
+ def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
41
 
42
  generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
 
43
 
44
  try:
45
  if img is not None:
 
109
  with gr.Column(scale=55):
110
  with gr.Group():
111
  with gr.Row():
112
+ prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="[your prompt]").style(container=False)
113
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
114
 
115
  image_out = gr.Image(height=512)
 
118
  with gr.Column(scale=45):
119
  with gr.Tab("Options"):
120
  with gr.Group():
121
+ neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
 
122
 
123
  with gr.Row():
124
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
 
135
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
136
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
137
 
138
+ inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
 
 
139
  outputs = [image_out, error_output]
140
  prompt.submit(inference, inputs=inputs, outputs=outputs)
141
  generate.click(inference, inputs=inputs, outputs=outputs)