Update app.py
Browse files
app.py
CHANGED
@@ -37,8 +37,10 @@ with gr.Blocks(css=css) as demo:
|
|
37 |
Direct Preference Optimization (DPO) for text-to-image diffusion models is a method to align diffusion models to text human preferences by directly optimizing on human comparison data.
|
38 |
</p>
|
39 |
""")
|
40 |
-
|
41 |
-
|
|
|
|
|
42 |
result = gr.Image(label="DPO SDXL Result")
|
43 |
|
44 |
gr.Examples(
|
|
|
37 |
Direct Preference Optimization (DPO) for text-to-image diffusion models is a method to align diffusion models to text human preferences by directly optimizing on human comparison data.
|
38 |
</p>
|
39 |
""")
|
40 |
+
with gr.Group():
|
41 |
+
with gr.Column():
|
42 |
+
prompt_in = gr.Textbox(label="Prompt", value="An old man with a bird on his head")
|
43 |
+
submit_btn = gr.Button("Submit")
|
44 |
result = gr.Image(label="DPO SDXL Result")
|
45 |
|
46 |
gr.Examples(
|