mbrack commited on
Commit
6352ff4
1 Parent(s): f65ce19

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -17,14 +17,14 @@ with zipfile.ZipFile("images/stable_diffusion.zip","r") as zip_ref:
17
  def open_stable_ims(profession):
18
  if len(profession) != 0:
19
  dirname = 'images/stable_diffusion/'+ profession+'/'
20
- images = [Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)]
21
- return images[:18]
22
 
23
  def open_fair_ims(profession):
24
  if len(profession) != 0:
25
  dirname = 'images/fair_diffusion/' + profession+'/'
26
- images = [Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)]
27
- return images[:18]
28
 
29
 
30
 
@@ -37,11 +37,11 @@ with gr.Blocks() as demo:
37
  with gr.Column():
38
  gr.Markdown('## Stable Diffusion Generations')
39
  choice1 = gr.Dropdown(professions, label = "Choose a profession", multiselect= False, interactive=True)
40
- images1 = gr.Gallery(label="Images").style(grid=[3], height="auto")
41
  with gr.Column():
42
  gr.Markdown('## Fair Diffusion Generations')
43
  choice2 = gr.Dropdown(professions, label = "Choose a profession", multiselect = False, interactive=True)
44
- images2 = gr.Gallery(label="Images").style(grid=[3], height="auto")
45
 
46
  gr.Markdown("We present a novel strategy, called **Fair Diffusion**, to attenuate biases after the deployment of generative text-to-image models. Specifically, we demonstrate shifting a bias, based on human instructions, in any direction yielding arbitrarily new proportions for, e.g., identity groups. As our empirical evaluation demonstrates, this introduced control enables instructing generative image models on fairness, with no data filtering and additional training required. For the full paper by Friedrich et al., see [here](https://arxiv.org/pdf/2302.10893.pdf).")
47
 
 
17
  def open_stable_ims(profession):
18
  if len(profession) != 0:
19
  dirname = 'images/stable_diffusion/'+ profession+'/'
20
+ images = random.shuffle([Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)])
21
+ return images[:16]
22
 
23
  def open_fair_ims(profession):
24
  if len(profession) != 0:
25
  dirname = 'images/fair_diffusion/' + profession+'/'
26
+ images = random.shuffle([Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)])
27
+ return images[:16]
28
 
29
 
30
 
 
37
  with gr.Column():
38
  gr.Markdown('## Stable Diffusion Generations')
39
  choice1 = gr.Dropdown(professions, label = "Choose a profession", multiselect= False, interactive=True)
40
+ images1 = gr.Gallery(label="Images").style(grid=[4], height="auto")
41
  with gr.Column():
42
  gr.Markdown('## Fair Diffusion Generations')
43
  choice2 = gr.Dropdown(professions, label = "Choose a profession", multiselect = False, interactive=True)
44
+ images2 = gr.Gallery(label="Images").style(grid=[4], height="auto")
45
 
46
  gr.Markdown("We present a novel strategy, called **Fair Diffusion**, to attenuate biases after the deployment of generative text-to-image models. Specifically, we demonstrate shifting a bias, based on human instructions, in any direction yielding arbitrarily new proportions for, e.g., identity groups. As our empirical evaluation demonstrates, this introduced control enables instructing generative image models on fairness, with no data filtering and additional training required. For the full paper by Friedrich et al., see [here](https://arxiv.org/pdf/2302.10893.pdf).")
47