SusiePHaltmann commited on
Commit
dbd58af
1 Parent(s): 07c72b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +184 -5
app.py CHANGED
@@ -1,5 +1,184 @@
1
- ## make a streamlit app gui that says hello world
2
- pip3 install "streamlit"
3
- import streamlit as st
4
- st.streamlit_version()
5
- st.title("FLAMESTOPIA.AI SYSTEMS (C) 2022-20XX - Version 0.0.1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from IPython.display import display
3
+ import torch as th
4
+ import gradio as gr
5
+
6
+ from glide_text2im.download import load_checkpoint
7
+ from glide_text2im.model_creation import (
8
+ create_model_and_diffusion,
9
+ model_and_diffusion_defaults,
10
+ model_and_diffusion_defaults_upsampler
11
+ )
12
+ # This notebook supports both CPU and GPU.
13
+ # On CPU, generating one sample may take on the order of 20 minutes.
14
+ # On a GPU, it should be under a minute.
15
+
16
+ has_cuda = th.cuda.is_available()
17
+ device = th.device('cpu' if not has_cuda else 'cuda')
18
+ print('Using device:', device)
19
+
20
+ # Create base model.
21
+ options = model_and_diffusion_defaults()
22
+ options['use_fp16'] = has_cuda
23
+ options['timestep_respacing'] = '100' # use 100 diffusion steps for fast sampling
24
+ model, diffusion = create_model_and_diffusion(**options)
25
+ model.eval()
26
+ if has_cuda:
27
+ model.convert_to_fp16()
28
+ model.to(device)
29
+ model.load_state_dict(load_checkpoint('base', device))
30
+ print('total base parameters', sum(x.numel() for x in model.parameters()))
31
+ # Create upsampler model.
32
+ options_up = model_and_diffusion_defaults_upsampler()
33
+ options_up['use_fp16'] = has_cuda
34
+ options_up['timestep_respacing'] = 'fast27' # use 27 diffusion steps for very fast sampling
35
+ model_up, diffusion_up = create_model_and_diffusion(**options_up)
36
+ model_up.eval()
37
+ if has_cuda:
38
+ model_up.convert_to_fp16()
39
+ model_up.to(device)
40
+ model_up.load_state_dict(load_checkpoint('upsample', device))
41
+ print('total upsampler parameters', sum(x.numel() for x in model_up.parameters()))
42
+
43
+ def show_images(batch: th.Tensor):
44
+ """ Display a batch of images inline. """
45
+ scaled = ((batch + 1)*127.5).round().clamp(0,255).to(th.uint8).cpu()
46
+ reshaped = scaled.permute(2, 0, 3, 1).reshape([batch.shape[2], -1, 3])
47
+ #display(Image.fromarray(reshaped.numpy()))
48
+ #Image.fromarray(reshaped.numpy()).save('image.png')
49
+
50
+
51
+ def get_images(batch: th.Tensor):
52
+ """ Display a batch of images inline. """
53
+ scaled = ((batch + 1)*127.5).round().clamp(0,255).to(th.uint8).cpu()
54
+ reshaped = scaled.permute(2, 0, 3, 1).reshape([batch.shape[2], -1, 3])
55
+ img = Image.fromarray(reshaped.numpy())
56
+ #img.save('img.png')
57
+ return img
58
+
59
+ # Sampling parameters
60
+ batch_size = 1
61
+ guidance_scale = 3.0
62
+
63
+ # Tune this parameter to control the sharpness of 256x256 images.
64
+ # A value of 1.0 is sharper, but sometimes results in grainy artifacts.
65
+ upsample_temp = 0.997
66
+
67
+
68
+ # Create a classifier-free guidance sampling function
69
+ def model_fn(x_t, ts, **kwargs):
70
+ half = x_t[: len(x_t) // 2]
71
+ combined = th.cat([half, half], dim=0)
72
+ model_out = model(combined, ts, **kwargs)
73
+ eps, rest = model_out[:, :3], model_out[:, 3:]
74
+ cond_eps, uncond_eps = th.split(eps, len(eps) // 2, dim=0)
75
+ half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
76
+ eps = th.cat([half_eps, half_eps], dim=0)
77
+ return th.cat([eps, rest], dim=1)
78
+
79
+ def run(prompt):
80
+
81
+ ##############################
82
+ # Sample from the base model #
83
+ ##############################
84
+
85
+ # Create the text tokens to feed to the model.
86
+ tokens = model.tokenizer.encode(prompt)
87
+ tokens, mask = model.tokenizer.padded_tokens_and_mask(
88
+ tokens, options['text_ctx']
89
+ )
90
+
91
+ # Create the classifier-free guidance tokens (empty)
92
+ full_batch_size = batch_size * 2
93
+ uncond_tokens, uncond_mask = model.tokenizer.padded_tokens_and_mask(
94
+ [], options['text_ctx']
95
+ )
96
+
97
+ # Pack the tokens together into model kwargs.
98
+ model_kwargs = dict(
99
+ tokens=th.tensor(
100
+ [tokens] * batch_size + [uncond_tokens] * batch_size, device=device
101
+ ),
102
+ mask=th.tensor(
103
+ [mask] * batch_size + [uncond_mask] * batch_size,
104
+ dtype=th.bool,
105
+ device=device,
106
+ ),
107
+ )
108
+
109
+
110
+ print('run():')
111
+
112
+ # Sample from the base model.
113
+ print(' # Sample from the base model.')
114
+ model.del_cache()
115
+ samples = diffusion.p_sample_loop(
116
+ model_fn,
117
+ (full_batch_size, 3, options["image_size"], options["image_size"]),
118
+ device=device,
119
+ clip_denoised=True,
120
+ progress=True,
121
+ model_kwargs=model_kwargs,
122
+ cond_fn=None,
123
+ )[:batch_size]
124
+ model.del_cache()
125
+
126
+ # Show the output
127
+ print(' # Show the output')
128
+ #show_images(samples)
129
+ ##############################
130
+ # Upsample the 64x64 samples #
131
+ ##############################
132
+
133
+ tokens = model_up.tokenizer.encode(prompt)
134
+ tokens, mask = model_up.tokenizer.padded_tokens_and_mask(
135
+ tokens, options_up['text_ctx']
136
+ )
137
+
138
+ # Create the model conditioning dict.
139
+ print(' # Create the model conditioning dict.')
140
+ model_kwargs = dict(
141
+ # Low-res image to upsample.
142
+ low_res=((samples+1)*127.5).round()/127.5 - 1,
143
+
144
+ # Text tokens
145
+ tokens=th.tensor(
146
+ [tokens] * batch_size, device=device
147
+ ),
148
+ mask=th.tensor(
149
+ [mask] * batch_size,
150
+ dtype=th.bool,
151
+ device=device,
152
+ ),
153
+ )
154
+
155
+ # Sample from the base model.
156
+ print(' # Sample from the base model.')
157
+ model_up.del_cache()
158
+ up_shape = (batch_size, 3, options_up["image_size"], options_up["image_size"])
159
+ up_samples = diffusion_up.ddim_sample_loop(
160
+ model_up,
161
+ up_shape,
162
+ noise=th.randn(up_shape, device=device) * upsample_temp,
163
+ device=device,
164
+ clip_denoised=True,
165
+ progress=True,
166
+ model_kwargs=model_kwargs,
167
+ cond_fn=None,
168
+ )[:batch_size]
169
+ model_up.del_cache()
170
+
171
+ # Show the output
172
+ print('# Show the output')
173
+ out_images = get_images(up_samples)
174
+
175
+ return out_images
176
+
177
+
178
+
179
+
180
+ iface = gr.Interface(
181
+ fn=run,
182
+ inputs=["text"],
183
+ outputs=["image"])
184
+ iface.launch()