vict0rsch commited on
Commit
ac09955
1 Parent(s): 95ba8da
Files changed (2) hide show
  1. app.py +84 -7
  2. climategan_wrapper.py +3 -2
app.py CHANGED
@@ -8,6 +8,7 @@ from skimage import io
8
  from urllib import parse
9
  import numpy as np
10
  from climategan_wrapper import ClimateGAN
 
11
 
12
 
13
  def predict(cg: ClimateGAN, api_key):
@@ -90,20 +91,96 @@ if __name__ == "__main__":
90
  )
91
  cg._setup_stable_diffusion()
92
 
93
- with gr.Blocks() as blocks:
 
 
 
 
 
 
 
 
 
 
 
 
94
  with gr.Row():
95
  with gr.Column():
96
  gr.Markdown("# ClimateGAN: Visualize Climate Change")
97
  gr.HTML(
98
- 'Climate change does not impact everyone equally. This Space shows the effects of the climate emergency, "one address at a time". Visit the original experience at <a href="https://thisclimatedoesnotexist.com/">ThisClimateDoesNotExist.com</a>.<br>Enter an address or upload a Street View image, and ClimateGAN will generate images showing how the location could be impacted by flooding, wildfires, or smog if it happened there.' # noqa: E501
99
- + "<br><br>This is <strong>not</strong> an exercise in climate prediction, rather an exercise of empathy, to put yourself in other's shoes, as if Climate Change came crushing on your doorstep." # noqa: E501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  )
101
  with gr.Column():
102
  gr.HTML(
103
- "<p style='text-align: center'>Visit <a href='https://thisclimatedoesnotexist.com/'>ThisClimateDoesNotExist</a> for more information. | Original <a href='https://github.com/cc-ai/climategan'>ClimateGAN GitHub Repo</a></p>" # noqa: E501
104
- + "<p>After you have selected an image and started the inference you will see all the outputs of ClimateGAN, including intermediate outputs such as the flood mask, the segmentation map and the depth maps used to produce the 3 events</p>"
105
- + "<p>This Space makes use of recent Stable Diffusion in-painting pipelines to replace ClimateGAN's original Painter. If you select 'Both' painters, you will see a comparison</p>"
106
- + "<p>Read the original <a href='https://openreview.net/forum?id=EZNOb_uNpJk', target='_blank'>ICLR 2021 ClimateGAN paper</a></p>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  )
108
  with gr.Row():
109
  gr.Markdown("## Inputs")
 
8
  from urllib import parse
9
  import numpy as np
10
  from climategan_wrapper import ClimateGAN
11
+ from textwrap import dedent
12
 
13
 
14
  def predict(cg: ClimateGAN, api_key):
 
91
  )
92
  cg._setup_stable_diffusion()
93
 
94
+ with gr.Blocks(
95
+ css=dedent(
96
+ """
97
+ a {
98
+ color: #0088ff;
99
+ text-decoration: underline;
100
+ }
101
+ strong {
102
+ color: #c34318;
103
+ }
104
+ """
105
+ )
106
+ ) as blocks:
107
  with gr.Row():
108
  with gr.Column():
109
  gr.Markdown("# ClimateGAN: Visualize Climate Change")
110
  gr.HTML(
111
+ dedent(
112
+ """
113
+ <p>
114
+ Climate change does not impact everyone equally.
115
+ This Space shows the effects of the climate emergency,
116
+ "one address at a time".
117
+ </p>
118
+
119
+ <p>
120
+ Visit the original experience at
121
+ <a href="https://thisclimatedoesnotexist.com/">
122
+ ThisClimateDoesNotExist.com
123
+ </a>
124
+ </p>
125
+
126
+ <br>
127
+
128
+ <p>
129
+ Enter an address or upload a Street View image, and ClimateGAN
130
+ will generate images showing how the location could be impacted
131
+ by flooding, wildfires, or smog if it happened there.
132
+ </p>
133
+
134
+ <br>
135
+
136
+ <p>
137
+ This is <strong>not</strong> an exercise in climate prediction,
138
+ rather an exercise of empathy, to put yourself in other's shoes,
139
+ as if Climate Change came crushing on your doorstep.
140
+ </p>
141
+ """
142
+ )
143
  )
144
  with gr.Column():
145
  gr.HTML(
146
+ dedent(
147
+ """
148
+ <p style='text-align: center'>
149
+ Visit
150
+ <a href='https://thisclimatedoesnotexist.com/'>
151
+ ThisClimateDoesNotExist.com
152
+ </a>
153
+ for more information.
154
+ |
155
+ Original
156
+ <a href='https://github.com/cc-ai/climategan'>
157
+ ClimateGAN GitHub Repo
158
+ </a>
159
+ </p>
160
+
161
+ <p>
162
+ After you have selected an image and started the inference you
163
+ will see all the outputs of ClimateGAN, including intermediate
164
+ outputs such as the flood mask, the segmentation map and the
165
+ depth maps used to produce the 3 events.
166
+ </p>
167
+
168
+ <p>
169
+ This Space makes use of recent Stable Diffusion in-painting
170
+ pipelines to replace ClimateGAN's original Painter. If you
171
+ select 'Both' painters, you will see a comparison
172
+ </p>
173
+
174
+ <p>
175
+ Read the original
176
+ <a
177
+ href='https://openreview.net/forum?id=EZNOb_uNpJk'
178
+ target='_blank'>
179
+ ICLR 2021 ClimateGAN paper
180
+ </a>
181
+ </p>
182
+ """
183
+ )
184
  )
185
  with gr.Row():
186
  gr.Markdown("## Inputs")
climategan_wrapper.py CHANGED
@@ -334,6 +334,7 @@ class ClimateGAN:
334
  if pil_image is not None:
335
  print("Warning: `pil_image` has been provided, it will override `images`")
336
  images = self._preprocess_image(np.array(pil_image))[None, ...]
 
337
 
338
  # Retrieve numpy events as a dict {event: array[BxHxWxC]}
339
  outputs = self.trainer.infer_all(
@@ -372,8 +373,8 @@ class ClimateGAN:
372
  if pil_image is None
373
  else Image.fromarray(mask[0])
374
  )
375
- print('input_mask size: ', input_mask.size)
376
- print('input_images size: ', input_images.size)
377
  floods = self.sdip_pipeline(
378
  prompt=[prompt] * images.shape[0],
379
  image=input_images,
 
334
  if pil_image is not None:
335
  print("Warning: `pil_image` has been provided, it will override `images`")
336
  images = self._preprocess_image(np.array(pil_image))[None, ...]
337
+ pil_image = Image.fromarray(((images[0] + 1) / 2 * 255).astype(np.uint8))
338
 
339
  # Retrieve numpy events as a dict {event: array[BxHxWxC]}
340
  outputs = self.trainer.infer_all(
 
373
  if pil_image is None
374
  else Image.fromarray(mask[0])
375
  )
376
+ print("input_mask size: ", input_mask.size)
377
+ print("input_images size: ", input_images.size)
378
  floods = self.sdip_pipeline(
379
  prompt=[prompt] * images.shape[0],
380
  image=input_images,