File size: 10,575 Bytes
6f68207
f360117
 
 
 
 
 
 
6f68207
447c576
 
 
f360117
 
 
 
93f11bd
 
 
 
a22a221
f360117
6f68207
db551d5
 
6f68207
db551d5
f360117
 
 
 
 
447c576
 
db551d5
 
385fb5f
 
6f68207
385fb5f
 
6f68207
 
 
 
db551d5
 
385fb5f
6f68207
db551d5
b1772c8
447c576
 
 
 
 
 
178e606
447c576
 
 
 
385fb5f
6f68207
385fb5f
6f68207
 
385fb5f
 
6f68207
 
 
 
 
 
 
 
 
 
 
 
 
 
447c576
 
385fb5f
447c576
385fb5f
447c576
 
 
e35bd8b
7dcdfac
f360117
 
2990438
 
 
6f68207
 
2990438
 
 
f360117
 
 
 
 
447c576
 
baada04
f360117
 
6f68207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385fb5f
f360117
 
 
6f68207
f360117
 
 
 
 
385fb5f
93b9a94
6f68207
2bc76ca
f360117
6f68207
 
 
7dcdfac
f360117
 
 
 
 
 
 
 
 
5d2597e
 
f360117
db551d5
 
 
f360117
04f075c
6c62bb5
 
7dcdfac
f360117
 
 
5d2597e
b1772c8
f360117
b1772c8
f360117
5d2597e
ae7b7f1
f360117
 
 
5d2597e
7dcdfac
f360117
db551d5
 
1146833
db551d5
ecec1ba
 
 
 
 
 
 
 
 
db551d5
ecec1ba
db551d5
 
 
 
 
 
 
 
 
 
 
 
 
ecec1ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db551d5
 
 
ecec1ba
6ef75ad
20072ce
db551d5
00de940
 
7dcdfac
 
 
 
 
 
 
 
 
 
 
 
f360117
e85edde
f360117
db551d5
 
 
f360117
 
7dcdfac
 
f360117
 
 
7dcdfac
 
f360117
 
 
7dcdfac
 
f360117
 
eeadab2
f360117
7dcdfac
5663ecc
a57acc8
178e606
f360117
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
DEVICE = 'cuda'

import gradio as gr
import numpy as np
from sklearn.svm import LinearSVC
from sklearn import preprocessing
import pandas as pd

from diffusers import LCMScheduler, AutoencoderTiny, EulerDiscreteScheduler, UNet2DConditionModel, AutoPipelineForText2Image
from diffusers.models import ImageProjection
import torch

import random
import time

import torch
from urllib.request import urlopen

from PIL import Image
import requests
from io import BytesIO, StringIO

from transformers import CLIPVisionModelWithProjection
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import spaces

prompt_list = [p for p in list(set(
                pd.read_csv('./twitter_prompts.csv').iloc[:, 1].tolist())) if type(p) == str]

start_time = time.time()

####################### Setup Model
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
sdxl_lightening = "ByteDance/SDXL-Lightning"
ckpt = "sdxl_lightning_2step_unet.safetensors"
unet = UNet2DConditionModel.from_config(model_id, subfolder="unet").to(DEVICE, torch.float16)
unet.load_state_dict(load_file(hf_hub_download(sdxl_lightening, ckpt), device=DEVICE))

image_encoder = CLIPVisionModelWithProjection.from_pretrained("h94/IP-Adapter",  subfolder="models/image_encoder", torch_dtype=torch.float16,).to(DEVICE)
pipe = AutoPipelineForText2Image.from_pretrained(model_id, unet=unet, torch_dtype=torch.float16, variant="fp16", image_encoder=image_encoder).to(DEVICE)
pipe.unet._load_ip_adapter_weights(torch.load(hf_hub_download('h94/IP-Adapter', 'sdxl_models/ip-adapter_sdxl_vit-h.bin')))
pipe.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl_vit-h.bin")
pipe.register_modules(image_encoder = image_encoder)

pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
pipe.to(device=DEVICE)


output_hidden_state = False
#######################

@spaces.GPU
def predict(
        prompt,
        im_emb=None,
        progress=gr.Progress(track_tqdm=True)
    ):
    """Run a single prediction on the model"""
    with torch.no_grad():
        if im_emb == None:
            im_emb = torch.zeros(1, 1024, dtype=torch.float16, device=DEVICE)
            
        im_emb = [im_emb.to(DEVICE).unsqueeze(0)]
        if prompt == '':
            image = pipe(
                prompt_embeds=torch.zeros(1, 1, 2048, dtype=torch.float16, device=DEVICE),
                pooled_prompt_embeds=torch.zeros(1, 1280, dtype=torch.float16, device=DEVICE),
                ip_adapter_image_embeds=im_emb,
                height=1024,
                width=1024,                                                                                                             
                num_inference_steps=2,
                guidance_scale=0,
            ).images[0]
        else:
            image = pipe(
                prompt=prompt,
                ip_adapter_image_embeds=im_emb,
                height=1024,
                width=1024,                                                                                                             
                num_inference_steps=2,
                guidance_scale=0,
            ).images[0]
        im_emb, _ = pipe.encode_image(
                image, DEVICE, 1, output_hidden_state
            )
        return image, im_emb.to('cpu')

# TODO add to state instead of shared across all
glob_idx = 0

def next_image(embs, ys, calibrate_prompts):
    global glob_idx
    glob_idx = glob_idx + 1

    # handle case where every instance of calibration prompts is 'Neither' or 'Like' or 'Dislike'
    if len(calibrate_prompts) == 0 and len(list(set(ys))) <= 1:
        embs.append(.01*torch.randn(1, 1024))
        embs.append(.01*torch.randn(1, 1024))
        ys.append(0)
        ys.append(1)
        
    with torch.no_grad():
        if len(calibrate_prompts) > 0:
            print('######### Calibrating with sample prompts #########')
            prompt = calibrate_prompts.pop(0)
            print(prompt)
            image, img_emb = predict(prompt)
            embs.append(img_emb)
            return image, embs, ys, calibrate_prompts
        else:
            print('######### Roaming #########')
            # sample a .8 of rated embeddings for some stochasticity, or at least two embeddings.
            n_to_choose = max(int(len(embs)*.8), 2)
            indices = random.sample(range(len(embs)), n_to_choose)
            
            # also add the latest 0 and the latest 1
            has_0 = False
            has_1 = False
            for i in reversed(range(len(ys))):
                if ys[i] == 0 and has_0 == False:
                    indices.append(i)
                    has_0 = True
                elif ys[i] == 1 and has_1 == False:
                    indices.append(i)
                    has_1 = True
                if has_0 and has_1:
                    break
            
            feature_embs = np.array(torch.cat([embs[i].to('cpu') for i in indices]).to('cpu'))
            scaler = preprocessing.StandardScaler().fit(feature_embs)
            feature_embs = scaler.transform(feature_embs)

            lin_class = LinearSVC(max_iter=50000, dual='auto', class_weight='balanced').fit(feature_embs, np.array([ys[i] for i in indices]))
            lin_class.coef_ = torch.tensor(lin_class.coef_, dtype=torch.double)
            lin_class.coef_ = (lin_class.coef_.flatten() / (lin_class.coef_.flatten().norm())).unsqueeze(0)

            rng_prompt = random.choice(prompt_list)
            w = 1# if len(embs) % 2 == 0 else 0
            im_emb = w * lin_class.coef_.to(dtype=torch.float16)
            prompt= 'an image' if glob_idx % 2 == 0 else rng_prompt
            print(prompt, len(ys))
            image, im_emb = predict(prompt, im_emb)
            embs.append(im_emb)
            if len(embs) > 100:
                embs.pop(0)
                ys.pop(0)
            return image, embs, ys, calibrate_prompts









def start(_, embs, ys, calibrate_prompts):
    image, embs, ys, calibrate_prompts = next_image(embs, ys, calibrate_prompts)
    return [
            gr.Button(value='Like (L)', interactive=True), 
            gr.Button(value='Neither (Space)', interactive=True), 
            gr.Button(value='Dislike (A)', interactive=True),
            gr.Button(value='Start', interactive=False),
            image,
            embs,
            ys,
            calibrate_prompts
            ]


def choose(choice, embs, ys, calibrate_prompts):
    if choice == 'Like (L)':
        choice = 1
    elif choice == 'Neither (Space)':
        _ = embs.pop(-1)
        img, embs, ys, calibrate_prompts = next_image(embs, ys, calibrate_prompts)
        return img, embs, ys, calibrate_prompts
    else:
        choice = 0
    ys.append(choice)
    img, embs, ys, calibrate_prompts = next_image(embs, ys, calibrate_prompts)
    return img, embs, ys, calibrate_prompts

css = '''.gradio-container{max-width: 700px !important}
#description{text-align: center}
#description h1, #description h3{display: block}
#description p{margin-top: 0}
.fade-in-out {animation: fadeInOut 3s forwards}
@keyframes fadeInOut {
    0% {
      background: var(--bg-color);
    }
    100% {
      background: var(--button-secondary-background-fill);
    }
}
'''
js_head = '''
<script>
document.addEventListener('keydown', function(event) {
    if (event.key === 'a' || event.key === 'A') {
        // Trigger click on 'dislike' if 'A' is pressed
        document.getElementById('dislike').click();
    } else if (event.key === ' ' || event.keyCode === 32) {
        // Trigger click on 'neither' if Spacebar is pressed
        document.getElementById('neither').click();
    } else if (event.key === 'l' || event.key === 'L') {
        // Trigger click on 'like' if 'L' is pressed
        document.getElementById('like').click();
    }
});
function fadeInOut(button, color) {
  button.style.setProperty('--bg-color', color);
  button.classList.remove('fade-in-out');
  void button.offsetWidth; // This line forces a repaint by accessing a DOM property
  
  button.classList.add('fade-in-out');
  button.addEventListener('animationend', () => {
    button.classList.remove('fade-in-out'); // Reset the animation state
  }, {once: true});
}
document.body.addEventListener('click', function(event) {
    const target = event.target;
    if (target.id === 'dislike') {
      fadeInOut(target, '#ff1717');
    } else if (target.id === 'like') {
      fadeInOut(target, '#006500');
    } else if (target.id === 'neither') {
      fadeInOut(target, '#cccccc');
    }
});
</script>
'''

with gr.Blocks(css=css, head=js_head) as demo:
    gr.Markdown('''### Zahir: Generative Recommenders for Unprompted, Scalable Exploration
    Explore the latent space without text prompts, based on your preferences. Learn more on [the write-up](https://rynmurdock.github.io/posts/2024/3/generative_recomenders/).
    ''', elem_id="description")
    embs = gr.State([])
    ys = gr.State([])
    calibrate_prompts = gr.State([
    "4k photo",
    'surrealist art',
    # 'a psychedelic, fractal view',
    'a beautiful collage',
    'abstract art',
    'an eldritch image',
    'a sketch',
    # 'a city full of darkness and graffiti',
    '',
    ])

    with gr.Row(elem_id='output-image'):
        img = gr.Image(interactive=False, elem_id='output-image',width=700)
    with gr.Row(equal_height=True):
        b3 = gr.Button(value='Dislike (A)', interactive=False, elem_id="dislike")
        b2 = gr.Button(value='Neither (Space)', interactive=False, elem_id="neither")
        b1 = gr.Button(value='Like (L)', interactive=False, elem_id="like")
        b1.click(
        choose, 
        [b1, embs, ys, calibrate_prompts],
        [img, embs, ys, calibrate_prompts]
        )
        b2.click(
        choose, 
        [b2, embs, ys, calibrate_prompts],
        [img, embs, ys, calibrate_prompts]
        )
        b3.click(
        choose, 
        [b3, embs, ys, calibrate_prompts],
        [img, embs, ys, calibrate_prompts]
        )
    with gr.Row():
        b4 = gr.Button(value='Start')
        b4.click(start,
                 [b4, embs, ys, calibrate_prompts],
                 [b1, b2, b3, b4, img, embs, ys, calibrate_prompts])
    with gr.Row():
        html = gr.HTML('''<div style='text-align:center; font-size:20px'>You will calibrate for several prompts and then roam.</ div>''')

demo.launch()  # Share your demo with just 1 extra parameter 🚀