|
import os |
|
from PIL import Image |
|
import torch |
|
import gradio as gr |
|
import torch |
|
torch.backends.cudnn.benchmark = True |
|
from torchvision import transforms, utils |
|
from util import * |
|
from PIL import Image |
|
import math |
|
import random |
|
import numpy as np |
|
from torch import nn, autograd, optim |
|
from torch.nn import functional as F |
|
from tqdm import tqdm |
|
import lpips |
|
from model import * |
|
|
|
|
|
|
|
|
|
from copy import deepcopy |
|
import imageio |
|
|
|
import os |
|
import sys |
|
import numpy as np |
|
from PIL import Image |
|
import torch |
|
import torchvision.transforms as transforms |
|
from argparse import Namespace |
|
from e4e.models.psp import pSp |
|
from util import * |
|
from huggingface_hub import hf_hub_download |
|
|
|
device= 'cpu' |
|
model_path_e = hf_hub_download(repo_id="Abhinowww/Capstone", filename="e4e_ffhq_encode.pt") |
|
ckpt = torch.load(model_path_e, map_location='cpu') |
|
opts = ckpt['opts'] |
|
opts['checkpoint_path'] = model_path_e |
|
opts= Namespace(**opts) |
|
net = pSp(opts, device).eval().to(device) |
|
|
|
@ torch.no_grad() |
|
def projection(img, name, device='cuda'): |
|
|
|
|
|
transform = transforms.Compose( |
|
[ |
|
transforms.Resize(256), |
|
transforms.CenterCrop(256), |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), |
|
] |
|
) |
|
img = transform(img).unsqueeze(0).to(device) |
|
images, w_plus = net(img, randomize_noise=False, return_latents=True) |
|
result_file = {} |
|
result_file['latent'] = w_plus[0] |
|
torch.save(result_file, name) |
|
return w_plus[0] |
|
|
|
|
|
|
|
|
|
device = 'cpu' |
|
|
|
|
|
latent_dim = 512 |
|
|
|
model_path_s = hf_hub_download(repo_id="Abhinowww/Capstone", filename="stylegan2-ffhq-config-f.pt") |
|
original_generator = Generator(1024, latent_dim, 8, 2).to(device) |
|
ckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage) |
|
original_generator.load_state_dict(ckpt["g_ema"], strict=False) |
|
mean_latent = original_generator.mean_latent(10000) |
|
|
|
|
|
generatorjokerfalse = deepcopy(original_generator) |
|
|
|
generatorjokertrue = deepcopy(original_generator) |
|
|
|
generatorvoldemortfalse = deepcopy(original_generator) |
|
|
|
generatorvoldemorttrue = deepcopy(original_generator) |
|
|
|
generatorpushpa = deepcopy(original_generator) |
|
|
|
generatorgiga = deepcopy(original_generator) |
|
|
|
generatorsketchtrue = deepcopy(original_generator) |
|
|
|
generatorsketchfalse = deepcopy(original_generator) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
transform = transforms.Compose( |
|
[ |
|
transforms.Resize((1024, 1024)), |
|
transforms.ToTensor(), |
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), |
|
] |
|
) |
|
|
|
|
|
modeljokerfalse = hf_hub_download(repo_id="Abhinowww/Capstone", filename="JokerEightHundredFalse.pt") |
|
ckptjokerfalse = torch.load(modeljokerfalse, map_location=lambda storage, loc: storage) |
|
generatorjokerfalse.load_state_dict(ckptjokerfalse, strict=False) |
|
|
|
modeljokertrue = hf_hub_download(repo_id="Abhinowww/Capstone", filename="JokerTwoHundredFiftyTrue.pt") |
|
ckptjokertrue = torch.load(modeljokertrue, map_location=lambda storage, loc: storage) |
|
generatorjokertrue.load_state_dict(ckptjokertrue, strict=False) |
|
|
|
modelvoldemortfalse = hf_hub_download(repo_id="Abhinowww/Capstone", filename="VoldemortFourHundredFalse.pt") |
|
ckptvoldemortfalse = torch.load(modelvoldemortfalse, map_location=lambda storage, loc: storage) |
|
generatorvoldemortfalse.load_state_dict(ckptvoldemortfalse, strict=False) |
|
|
|
modelvoldemorttrue = hf_hub_download(repo_id="Abhinowww/Capstone", filename="VoldemortThreeHundredTrue.pt") |
|
ckptvoldemorttrue = torch.load(modelvoldemorttrue, map_location=lambda storage, loc: storage) |
|
generatorvoldemorttrue.load_state_dict(ckptvoldemorttrue, strict=False) |
|
|
|
modelpushpa = hf_hub_download(repo_id="Abhinowww/Capstone", filename="PushpaFourHundredFalse.pt") |
|
ckptpushpa = torch.load(modelpushpa, map_location=lambda storage, loc: storage) |
|
generatorpushpa.load_state_dict(ckptpushpa, strict=False) |
|
|
|
modelgiga = hf_hub_download(repo_id="Abhinowww/Capstone", filename="GigachadFourHundredFalse.pt") |
|
ckptgiga = torch.load(modelgiga, map_location=lambda storage, loc: storage) |
|
generatorgiga.load_state_dict(ckptgiga, strict=False) |
|
|
|
modelsketchtrue = hf_hub_download(repo_id="Abhinowww/Capstone", filename="OGSketchFourHundredTrue.pt") |
|
ckptsketchtrue = torch.load(modelsketchtrue, map_location=lambda storage, loc: storage) |
|
generatorsketchtrue.load_state_dict(ckptsketchtrue, strict=False) |
|
|
|
modelsketchfalse = hf_hub_download(repo_id="Abhinowww/Capstone", filename="OGSketchFourHundredFalse.pt") |
|
ckptsketchfalse = torch.load(modelsketchfalse, map_location=lambda storage, loc: storage) |
|
generatorsketchfalse.load_state_dict(ckptsketchfalse, strict=False) |
|
|
|
|
|
|
|
def inference(img, model): |
|
img.save('out.jpg') |
|
aligned_face = align_face('out.jpg') |
|
|
|
my_w = projection(aligned_face, "test.pt", device).unsqueeze(0) |
|
if model == 'Joker': |
|
with torch.no_grad(): |
|
my_sample = generatorjokerfalse(my_w, input_is_latent=True) |
|
elif model == 'Joker Preserve': |
|
with torch.no_grad(): |
|
my_sample = generatorjokertrue(my_w, input_is_latent=True) |
|
elif model == 'Voldemort': |
|
with torch.no_grad(): |
|
my_sample = generatorvoldemortfalse(my_w, input_is_latent=True) |
|
elif model == 'Voldemort Preserve': |
|
with torch.no_grad(): |
|
my_sample = generatorvoldemorttrue(my_w, input_is_latent=True) |
|
elif model == 'Pushpa': |
|
with torch.no_grad(): |
|
my_sample = generatorpushpa(my_w, input_is_latent=True) |
|
elif model == 'Gigachad': |
|
with torch.no_grad(): |
|
my_sample = generatorgiga(my_w, input_is_latent=True) |
|
elif model == 'Sketch': |
|
with torch.no_grad(): |
|
my_sample = generatorsketchfalse(my_w, input_is_latent=True) |
|
elif model == 'Sketch Preserve': |
|
with torch.no_grad(): |
|
my_sample = generatorsketchtrue(my_w, input_is_latent=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
npimage = my_sample[0].permute(1, 2, 0).detach().numpy() |
|
imageio.imwrite('filename.jpeg', npimage) |
|
return 'filename.jpeg' |
|
|
|
title = "Image Generation Using Style Adaptation: A Capstone Project by Abhinav Bandaru" |
|
description = "Upload your input image in the left, choose a style model, click on submit, and wait for it." |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['Joker', 'Joker Preserve', 'Voldemort', 'Voldemort Preserve', 'Pushpa', 'Gigachad', 'Sketch', 'Sketch Preserve'], type="value", default='Joker', label="Model")], gr.outputs.Image(type="pil"),title=title,description=description,allow_flagging=False,allow_screenshot=False).launch() |