fffiloni commited on
Commit
374aa25
1 Parent(s): 5fb1dae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -37,8 +37,10 @@ with gr.Blocks(css=css) as demo:
37
  Direct Preference Optimization (DPO) for text-to-image diffusion models is a method to align diffusion models to text human preferences by directly optimizing on human comparison data.
38
  </p>
39
  """)
40
- prompt_in = gr.Textbox(label="Prompt", value="An old man with a bird on his head")
41
- submit_btn = gr.Button("Submit")
 
 
42
  result = gr.Image(label="DPO SDXL Result")
43
 
44
  gr.Examples(
 
37
  Direct Preference Optimization (DPO) for text-to-image diffusion models is a method to align diffusion models to text human preferences by directly optimizing on human comparison data.
38
  </p>
39
  """)
40
+ with gr.Group():
41
+ with gr.Column():
42
+ prompt_in = gr.Textbox(label="Prompt", value="An old man with a bird on his head")
43
+ submit_btn = gr.Button("Submit")
44
  result = gr.Image(label="DPO SDXL Result")
45
 
46
  gr.Examples(