|
import os |
|
import tensorflow as tf |
|
|
|
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED' |
|
import IPython.display as display |
|
|
|
import matplotlib.pyplot as plt |
|
import matplotlib as mpl |
|
mpl.rcParams['figure.figsize'] = (12, 12) |
|
mpl.rcParams['axes.grid'] = False |
|
|
|
import numpy as np |
|
import PIL.Image |
|
|
|
def tensor_to_image(tensor): |
|
tensor = tensor*255 |
|
tensor = np.array(tensor, dtype=np.uint8) |
|
if np.ndim(tensor)>3: |
|
assert tensor.shape[0] == 1 |
|
tensor = tensor[0] |
|
return PIL.Image.fromarray(tensor) |
|
|
|
def load_img(path_to_img): |
|
max_dim = 1024 |
|
img = tf.io.read_file(path_to_img) |
|
img = tf.image.decode_image(img, channels=3) |
|
img = tf.image.convert_image_dtype(img, tf.float32) |
|
|
|
shape = tf.cast(tf.shape(img)[:-1], tf.float32) |
|
long_dim = max(shape) |
|
scale = max_dim / long_dim |
|
|
|
new_shape = tf.cast(shape * scale, tf.int32) |
|
|
|
img = tf.image.resize(img, new_shape) |
|
img = img[tf.newaxis, :] |
|
return img |
|
|
|
def imshow(image, title=None): |
|
if len(image.shape) > 3: |
|
image = tf.squeeze(image, axis=0) |
|
|
|
plt.imshow(image) |
|
if title: |
|
plt.title(title) |
|
|
|
content_layers = ['block5_conv2'] |
|
|
|
style_layers = ['block1_conv1', |
|
'block2_conv1', |
|
'block3_conv1', |
|
'block4_conv1', |
|
'block5_conv1'] |
|
|
|
num_content_layers = len(content_layers) |
|
num_style_layers = len(style_layers) |
|
|
|
def vgg_layers(layer_names): |
|
""" Creates a vgg model that returns a list of intermediate output values.""" |
|
|
|
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet') |
|
vgg.trainable = False |
|
|
|
outputs = [vgg.get_layer(name).output for name in layer_names] |
|
|
|
model = tf.keras.Model([vgg.input], outputs) |
|
return model |
|
|
|
def gram_matrix(input_tensor): |
|
result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor) |
|
input_shape = tf.shape(input_tensor) |
|
num_locations = tf.cast(input_shape[1]*input_shape[2], tf.float32) |
|
return result/(num_locations) |
|
|
|
class StyleContentModel(tf.keras.models.Model): |
|
def __init__(self, style_layers, content_layers): |
|
super(StyleContentModel, self).__init__() |
|
self.vgg = vgg_layers(style_layers + content_layers) |
|
self.style_layers = style_layers |
|
self.content_layers = content_layers |
|
self.num_style_layers = len(style_layers) |
|
self.vgg.trainable = False |
|
|
|
def call(self, inputs): |
|
"Expects float input in [0,1]" |
|
inputs = inputs*255.0 |
|
preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs) |
|
outputs = self.vgg(preprocessed_input) |
|
style_outputs, content_outputs = (outputs[:self.num_style_layers], |
|
outputs[self.num_style_layers:]) |
|
|
|
style_outputs = [gram_matrix(style_output) |
|
for style_output in style_outputs] |
|
|
|
content_dict = {content_name: value |
|
for content_name, value |
|
in zip(self.content_layers, content_outputs)} |
|
|
|
style_dict = {style_name: value |
|
for style_name, value |
|
in zip(self.style_layers, style_outputs)} |
|
|
|
return {'content': content_dict, 'style': style_dict} |
|
|
|
extractor = StyleContentModel(style_layers, content_layers) |
|
|
|
def clip_0_1(image): |
|
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) |
|
|
|
def high_pass_x_y(image): |
|
x_var = image[:, :, 1:, :] - image[:, :, :-1, :] |
|
y_var = image[:, 1:, :, :] - image[:, :-1, :, :] |
|
|
|
return x_var, y_var |
|
|
|
def total_variation_loss(image): |
|
x_deltas, y_deltas = high_pass_x_y(image) |
|
return tf.reduce_sum(tf.abs(x_deltas)) + tf.reduce_sum(tf.abs(y_deltas)) |
|
|
|
opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1) |
|
|
|
style_weight=1e-2 |
|
content_weight=1e4 |
|
total_variation_weight=30 |
|
|
|
epochs = 10 |
|
steps_per_epoch = 50 |
|
|
|
def transfer_style(content_path,style_path,transfer_mode,steps_per_epoch=100,style_weight=1e-2,content_weight=1e4,total_variation_weight=30): |
|
try: |
|
|
|
content_image = load_img(content_path) |
|
style_image = load_img(style_path) |
|
if transfer_mode == "Fast_transfer": |
|
res = transfer_style_fast(content_image,style_image) |
|
else: |
|
res = transfer_style_custom(content_image,style_image,int(steps_per_epoch),style_weight,content_weight,total_variation_weight) |
|
res = tensor_to_image(res) |
|
except Exception as ex: |
|
raise Exception(ex) |
|
return res |
|
|
|
def transfer_style_fast(content_image,style_image): |
|
import tensorflow_hub as hub |
|
hub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2') |
|
return hub_model(tf.constant(content_image), tf.constant(style_image))[0] |
|
|
|
def transfer_style_custom(content_image,style_image,steps_per_epoch=100,style_weight=1e-2,content_weight=1e4,total_variation_weight=30): |
|
|
|
def style_content_loss(outputs): |
|
style_outputs = outputs['style'] |
|
content_outputs = outputs['content'] |
|
style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2) |
|
for name in style_outputs.keys()]) |
|
style_loss *= style_weight / num_style_layers |
|
|
|
content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2) |
|
for name in content_outputs.keys()]) |
|
content_loss *= content_weight / num_content_layers |
|
loss = style_loss + content_loss |
|
return loss |
|
|
|
@tf.function() |
|
def train_step(image): |
|
with tf.GradientTape() as tape: |
|
outputs = extractor(image) |
|
loss = style_content_loss(outputs) |
|
loss += total_variation_weight*tf.image.total_variation(image) |
|
|
|
grad = tape.gradient(loss, image) |
|
opt.apply_gradients([(grad, image)]) |
|
image.assign(clip_0_1(image)) |
|
try: |
|
style_targets = extractor(style_image)['style'] |
|
content_targets = extractor(content_image)['content'] |
|
image = tf.Variable(content_image) |
|
|
|
step = 0 |
|
for n in range(epochs): |
|
for m in range(steps_per_epoch): |
|
step += 1 |
|
train_step(image) |
|
except Exception as ex: |
|
raise Exception(ex) |
|
|
|
return image |
|
|
|
import gradio as gr |
|
|
|
inputs = [ |
|
gr.inputs.Image(type="filepath"), |
|
gr.inputs.Image(type="filepath"), |
|
gr.inputs.Radio(["Fast_transfer","Custom_transfer"]), |
|
gr.inputs.Slider(1,100,default=30,step=1), |
|
gr.inputs.Number(1e-2), |
|
gr.inputs.Number(1e4), |
|
gr.inputs.Number(30) |
|
] |
|
|
|
iface = gr.Interface( |
|
fn=transfer_style, |
|
inputs=inputs, |
|
examples=[["NST/etsii.jpg","NST/data/style_2.jpg","Fast_transfer",30,1e-2,1e4,30], |
|
["NST/data/content_9.jpg","NST/ola.png","Fast_transfer",30,1e-2,1e4,30], |
|
["NST/sailboat_cropped.jpg","NST/sketch_cropped.png","Fast_transfer",30,1e-2,1e4,30], |
|
["NST/armadillo.jpg","NST/data/style_3.jpg","Fast_transfer",30,1e-2,1e4,30], |
|
["NST/gato.jpg","NST/data/style_4.jpg","Fast_transfer",30,1e-2,1e4,30], |
|
], |
|
outputs="image").launch(debug=True) |