File size: 9,182 Bytes
fd63a18
d14e266
 
 
fd63a18
 
 
 
d14e266
fd63a18
52f8f2b
e774b65
fd63a18
 
d14e266
 
 
 
52f8f2b
ae2d652
fd63a18
bd87e2e
fd63a18
 
e774b65
fd63a18
 
 
 
 
 
bd87e2e
fd63a18
 
 
bd87e2e
fd63a18
 
 
 
 
bd87e2e
fd63a18
d14e266
e774b65
 
fd63a18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d14e266
 
 
 
 
fd63a18
 
 
 
 
 
 
 
5a26166
 
 
 
 
 
 
4bfabee
fd63a18
e774b65
 
 
 
 
 
 
 
fd63a18
e774b65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd63a18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e774b65
fd63a18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d14e266
fd63a18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d14e266
fd63a18
 
 
 
 
d14e266
 
 
 
 
 
fd63a18
 
 
d14e266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
import json

import torch

from huggingnft.lightweight_gan.train import timestamped_filename
from streamlit_option_menu import option_menu

from huggingface_hub import hf_hub_download, file_download
from PIL import Image

from huggingface_hub.hf_api import HfApi
import streamlit as st
from huggingnft.lightweight_gan.lightweight_gan import Generator, LightweightGAN, evaluate_in_chunks, Trainer
from accelerate import Accelerator
from huggan.pytorch.cyclegan.modeling_cyclegan import GeneratorResNet
from torchvision import transforms as T
from torchvision.transforms import Compose, Resize, ToTensor, Normalize, RandomCrop, RandomHorizontalFlip
from torchvision.utils import make_grid

hfapi = HfApi()
model_names = [model.modelId[model.modelId.index("/") + 1:] for model in hfapi.list_models(author="huggingnft")]

# streamlit-option-menu
# st.set_page_config(page_title="Sharone's Streamlit App Gallery", page_icon="", layout="wide")

# sysmenu = '''
# <style>
# #MainMenu {visibility:hidden;}
# footer {visibility:hidden;}
# '''
# st.markdown(sysmenu,unsafe_allow_html=True)

# # Add a logo (optional) in the sidebar
# logo = Image.open(r'C:\Users\13525\Desktop\Insights_Bees_logo.png')
# profile = Image.open(r'C:\Users\13525\Desktop\medium_profile.png')

ABOUT_TEXT = "🤗 Hugging NFT - Generate NFT by OpenSea collection name."
CONTACT_TEXT = "Here is some contact info"
GENERATE_IMAGE_TEXT = "Text about generation"
INTERPOLATION_TEXT = "Text about Interpolation"
COLLECTION2COLLECTION_TEXT = "Text about Collection2Collection"

STOPWORDS = ["-old"]
COLLECTION2COLLECTION_KEYS = ["__2__"]


def load_lightweight_model(model_name):
    file_path = file_download.hf_hub_download(
        repo_id=model_name,
        filename="config.json"
    )
    config = json.loads(open(file_path).read())
    organization_name, name = model_name.split("/")
    model = Trainer(**config, organization_name=organization_name, name=name)
    model.load(use_cpu=True)
    model.accelerator = Accelerator()
    return model


def clean_models(model_names, stopwords):
    cleaned_model_names = []
    for model_name in model_names:
        clear = True
        for stopword in stopwords:
            if stopword in model_name:
                clear = False
                break
        if clear:
            cleaned_model_names.append(model_name)
    return cleaned_model_names

def get_concat_h(im1, im2):
    dst = Image.new('RGB', (im1.width + im2.width, im1.height))
    dst.paste(im1, (0, 0))
    dst.paste(im2, (im1.width, 0))
    return dst

model_names = clean_models(model_names, STOPWORDS)

with st.sidebar:
    choose = option_menu("Hugging NFT",
                         ["About", "Generate image", "Interpolation", "Collection2Collection", "Contact"],
                         icons=['house', 'camera fill', 'bi bi-youtube', 'book', 'person lines fill'],
                         menu_icon="app-indicator", default_index=0,
                         styles={
                             # "container": {"padding": "5!important", "background-color": "#fafafa", },
                             "container": {"border-radius": ".0rem"},
                             # "icon": {"color": "orange", "font-size": "25px"},
                             # "nav-link": {"font-size": "16px", "text-align": "left", "margin": "0px",
                             #              "--hover-color": "#eee"},
                             # "nav-link-selected": {"background-color": "#02ab21"},
                         }
                         )
st.sidebar.markdown(
    """
<style>
.aligncenter {
    text-align: center;
}
</style>
<p style='text-align: center'>
<a href="https://github.com/AlekseyKorshuk/huggingnft" target="_blank">Project Repository</a>
</p>
<p class="aligncenter">
    <a href="https://github.com/AlekseyKorshuk/huggingnft" target="_blank"> 
        <img src="https://img.shields.io/github/stars/AlekseyKorshuk/huggingnft?style=social"/>
    </a>
</p>
<p class="aligncenter">
    <a href="https://twitter.com/alekseykorshuk" target="_blank"> 
        <img src="https://img.shields.io/twitter/follow/alekseykorshuk?style=social"/>
    </a>
</p>
    """,
    unsafe_allow_html=True,
)

if choose == "About":
    st.title(choose)
    st.markdown(ABOUT_TEXT)

if choose == "Contact":
    st.title(choose)
    st.markdown(CONTACT_TEXT)

if choose == "Generate image":
    st.title(choose)
    st.markdown(GENERATE_IMAGE_TEXT)

    model_name = st.selectbox(
        'Choose model:',
        clean_models(model_names, COLLECTION2COLLECTION_KEYS)
    )
    generation_type = st.selectbox(
        'Select generation type:',
        ["default", "ema"]
    )

    nrows = st.number_input("Number of rows:",
                            min_value=1,
                            max_value=10,
                            step=1,
                            value=8,
                            )
    generate_image_button = st.button("Generate")

    if generate_image_button:
        with st.spinner(text=f"Downloading selected model..."):
            model = load_lightweight_model(f"huggingnft/{model_name}")
        with st.spinner(text=f"Generating..."):
            st.image(
                model.generate_app(
                    num=timestamped_filename(),
                    nrow=nrows,
                    checkpoint=-1,
                    types=generation_type
                )[0]
            )

if choose == "Interpolation":
    st.title(choose)
    st.markdown(INTERPOLATION_TEXT)

    model_name = st.selectbox(
        'Choose model:',
        clean_models(model_names, COLLECTION2COLLECTION_KEYS)
    )
    nrows = st.number_input("Number of rows:",
                            min_value=1,
                            max_value=10,
                            step=1,
                            value=1,
                            )

    num_steps = st.number_input("Number of steps:",
                                min_value=1,
                                max_value=1000,
                                step=1,
                                value=100,
                                )
    generate_image_button = st.button("Generate")

    if generate_image_button:
        with st.spinner(text=f"Downloading selected model..."):
            model = load_lightweight_model(f"huggingnft/{model_name}")
        my_bar = st.progress(0)
        result = model.generate_interpolation(
            num=timestamped_filename(),
            num_image_tiles=nrows,
            num_steps=num_steps,
            save_frames=False,
            progress_bar=my_bar
        )
        my_bar.empty()
        with st.spinner(text=f"Uploading result..."):
            st.image(result)

if choose == "Collection2Collection":
    st.title(choose)
    st.markdown(COLLECTION2COLLECTION_TEXT)

    model_name = st.selectbox(
        'Choose model:',
        set(model_names) - set(clean_models(model_names, COLLECTION2COLLECTION_KEYS))
    )
    nrows = st.number_input("Number of images to generate:",
                            min_value=1,
                            max_value=10,
                            step=1,
                            value=1,
                            )
    generate_image_button = st.button("Generate")

    if generate_image_button:
        n_channels = 3

        image_size = 256

        input_shape = (image_size, image_size)

        transform = Compose([
            T.ToPILImage(),
            T.Resize(input_shape),
            ToTensor(),
            Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        # generator = modeling_dcgan.Generator.from_pretrained("huggingnft/cryptopunks")
        with st.spinner(text=f"Downloading selected model..."):
            translator = GeneratorResNet.from_pretrained(f'huggingnft/{model_name}',
                                                         input_shape=(n_channels, image_size, image_size),
                                                         num_residual_blocks=9)

        z = torch.randn(nrows, 100, 1, 1)

        with st.spinner(text=f"Downloading selected model..."):
            model = load_lightweight_model(f"huggingnft/{model_name.split('__2__')[0]}")

        with st.spinner(text=f"Generating input images..."):
            punks = model.generate_app(
                num=timestamped_filename(),
                nrow=4,
                checkpoint=-1,
                types="default"
            )[1]

        pipe_transform = T.Resize((256, 256))

        input = pipe_transform(punks)

        with st.spinner(text=f"Generating output images..."):
            output = translator(input)

        out_img = make_grid(output,
                            nrow=4, normalize=True)

        # out_img = make_grid(punks,
        # nrow=8, normalize=True)

        out_transform = Compose([
            T.ToPILImage()
        ])

        results = []

        for out_punk, out_ape in zip(input, output):
            results.append(
                get_concat_h(out_transform(make_grid(out_punk, nrow=1, normalize=True)), out_transform(make_grid(out_ape, nrow=1, normalize=True)))
            )
        for result in results:
            st.image(result)