czl commited on
Commit
bd14b9f
1 Parent(s): 846f790

added export format

Browse files
Files changed (1) hide show
  1. app.py +30 -5
app.py CHANGED
@@ -184,16 +184,21 @@ def update_sampling_steps(total_steps, sample_steps):
184
  return gr.update(value=total_steps)
185
 
186
 
 
 
 
 
187
  if torch.cuda.is_available():
188
  power_device = "GPU"
189
  else:
190
  power_device = "CPU"
191
 
192
- with gr.Blocks(title="Generative Date Augmentation") as demo:
193
 
194
  gr.Markdown(
195
  """
196
  # Data Augmentation with Image-to-Image Diffusion Models via Prompt Interpolation.
 
197
  """
198
  )
199
  with gr.Row():
@@ -203,7 +208,7 @@ with gr.Blocks(title="Generative Date Augmentation") as demo:
203
 
204
  with gr.Row():
205
  prompt1 = gr.Text(
206
- label="Prompt 1",
207
  show_label=True,
208
  max_lines=1,
209
  placeholder="Enter your first prompt",
@@ -211,7 +216,7 @@ with gr.Blocks(title="Generative Date Augmentation") as demo:
211
  )
212
  with gr.Row():
213
  prompt2 = gr.Text(
214
- label="Prompt 2",
215
  show_label=True,
216
  max_lines=1,
217
  placeholder="Enter your second prompt",
@@ -321,9 +326,23 @@ with gr.Blocks(title="Generative Date Augmentation") as demo:
321
  step=2,
322
  value=0,
323
  )
 
 
 
 
 
 
 
 
 
 
324
  with gr.Column():
325
- result = gr.Image(label="Result", show_label=False)
326
-
 
 
 
 
327
  gr.Markdown(
328
  """
329
  Metadata:
@@ -348,7 +367,13 @@ Currently running on {power_device}.
348
  Note: Running on CPU will take longer (approx. 6 minutes with default settings).
349
  """
350
  )
 
 
 
351
 
 
 
 
352
  run_button.click(
353
  fn=infer,
354
  inputs=[
 
184
  return gr.update(value=total_steps)
185
 
186
 
187
+ def update_format(image_format):
188
+ return gr.update(format=image_format)
189
+
190
+
191
  if torch.cuda.is_available():
192
  power_device = "GPU"
193
  else:
194
  power_device = "CPU"
195
 
196
+ with gr.Blocks(title="Generative Date Augmentation Demo") as demo:
197
 
198
  gr.Markdown(
199
  """
200
  # Data Augmentation with Image-to-Image Diffusion Models via Prompt Interpolation.
201
+ Main GitHub Repo: [Generative Data Augmentation](https://github.com/zhulinchng/generative-data-augmentation) | Image Classification Demo: [Generative Augmented Classifiers](https://huggingface.co/spaces/czl/generative-augmented-classifiers).
202
  """
203
  )
204
  with gr.Row():
 
208
 
209
  with gr.Row():
210
  prompt1 = gr.Text(
211
+ label="Prompt for the image to synthesize. (Actual class)",
212
  show_label=True,
213
  max_lines=1,
214
  placeholder="Enter your first prompt",
 
216
  )
217
  with gr.Row():
218
  prompt2 = gr.Text(
219
+ label="Prompt to augment against. (Confusing class)",
220
  show_label=True,
221
  max_lines=1,
222
  placeholder="Enter your second prompt",
 
326
  step=2,
327
  value=0,
328
  )
329
+ with gr.Row():
330
+ image_type = gr.Radio(
331
+ choices=[
332
+ "webp",
333
+ "png",
334
+ "jpeg",
335
+ ],
336
+ label="Download Image Format",
337
+ value="jpeg",
338
+ )
339
  with gr.Column():
340
+ result = gr.Image(label="Result", show_label=False, format="jpeg")
341
+ image_type.change(
342
+ fn=update_format,
343
+ inputs=[image_type],
344
+ outputs=[result],
345
+ )
346
  gr.Markdown(
347
  """
348
  Metadata:
 
367
  Note: Running on CPU will take longer (approx. 6 minutes with default settings).
368
  """
369
  )
370
+ gr.Markdown(
371
+ """
372
+ This demo is created as part of the 'Investigating the Effectiveness of Generative Diffusion Models in Synthesizing Images for Data Augmentation in Image Classification' dissertation.
373
 
374
+ The user can augment an image by interpolating between two prompts, and specify the number of interpolation steps and the specific step to generate the image.
375
+ """
376
+ )
377
  run_button.click(
378
  fn=infer,
379
  inputs=[