Spaces:
Running
Running
Alican Akca
commited on
Commit
·
e853021
1
Parent(s):
662dec3
GIF and Video Processing
Browse files- app.py +28 -51
- methods/img2pixl.py +53 -63
- methods/media.py +32 -0
- requirements.txt +3 -12
app.py
CHANGED
@@ -1,76 +1,53 @@
|
|
1 |
import cv2
|
2 |
import torch
|
|
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
5 |
import paddlehub as hub
|
6 |
from PIL import Image
|
7 |
from methods.img2pixl import pixL
|
8 |
from examples.pixelArt.combine import combine
|
|
|
9 |
|
10 |
-
|
|
|
|
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
|
13 |
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()
|
14 |
|
15 |
-
def GIF(fname,pixel_size):
|
16 |
-
gif = Image.open(fname)
|
17 |
-
frames = []
|
18 |
-
for i in range(gif.n_frames): #First Step: Splitting the GIF into frames
|
19 |
-
gif.seek(i)
|
20 |
-
frame = Image.new('RGB', gif.size)
|
21 |
-
frame.paste(gif)
|
22 |
-
frame = np.array(frame)
|
23 |
-
frames.append(frame)
|
24 |
-
result = pixL().toThePixL(frames, pixel_size)
|
25 |
-
for frame in result: #Second Step: Adding Cartoon Effect to each frame
|
26 |
-
frame = Image.fromarray(frame)
|
27 |
-
frame = cv2.cvtColor(np.asarray(face2paint(model, frame)), cv2.COLOR_BGR2RGB)
|
28 |
-
frames = []
|
29 |
-
<<<<<<< HEAD
|
30 |
-
for frame in result: #Third Step: Combining the frames into a GIF
|
31 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
32 |
-
frame = Image.fromarray(frame)
|
33 |
-
frames.append(frame)
|
34 |
-
frames[0].save('cache.gif', append_images=frames, save_all=True, loop=1)
|
35 |
-
cache = Image.open('cache.gif')
|
36 |
-
return cache
|
37 |
-
=======
|
38 |
-
for frame in result:
|
39 |
-
|
40 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
41 |
-
frame = Image.fromarray(frame)
|
42 |
-
frames.append(frame)
|
43 |
-
print(type(frames), len(frames), type(frames[0]), frames[0].size)
|
44 |
-
frames[0].save('cache.gif', append_images=frames, save_all=True, loop=1)
|
45 |
-
return Image.open('cache.gif')
|
46 |
-
>>>>>>> a2e31df8118400de0782a9feb55255620b93cd83
|
47 |
|
48 |
-
def initilize(
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
51 |
else:
|
52 |
-
|
53 |
-
|
54 |
if checkbox1:
|
55 |
-
result =
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
visualization=True)
|
62 |
result = combine.combiner(images = pixL().toThePixL([result[0]['front'][:,:,::-1], result[0]['mask']],
|
63 |
pixel_size),
|
64 |
-
background_image =
|
65 |
else:
|
66 |
-
result = pixL().toThePixL([
|
67 |
-
|
|
|
68 |
|
69 |
-
inputs = [
|
70 |
gr.Slider(4, 100, value=12, step = 2, label="Pixel Size"),
|
71 |
gr.Checkbox(label="Object-Oriented Inference", value=False)]
|
72 |
-
outputs = [gr.
|
73 |
-
|
|
|
|
|
74 |
gr.Interface(fn = initilize,
|
75 |
inputs = inputs,
|
76 |
outputs = outputs).launch()
|
|
|
1 |
import cv2
|
2 |
import torch
|
3 |
+
import warnings
|
4 |
import numpy as np
|
5 |
import gradio as gr
|
6 |
import paddlehub as hub
|
7 |
from PIL import Image
|
8 |
from methods.img2pixl import pixL
|
9 |
from examples.pixelArt.combine import combine
|
10 |
+
from methods.media import Media
|
11 |
|
12 |
+
warnings.filterwarnings("ignore")
|
13 |
+
|
14 |
+
U2Net = hub.Module(name='U2Net')
|
15 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
|
17 |
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
def initilize(media,pixel_size,checkbox1):
|
21 |
+
#Author: Alican Akca
|
22 |
+
if media.name.endswith('.gif'):
|
23 |
+
return Media().split(media.name,pixel_size)
|
24 |
+
elif media.name.endswith('.mp4'):
|
25 |
+
return Media().split(media.name,pixel_size)
|
26 |
else:
|
27 |
+
media = Image.open(media.name).convert("RGB")
|
28 |
+
media = cv2.cvtColor(np.asarray(face2paint(model, media)), cv2.COLOR_BGR2RGB)
|
29 |
if checkbox1:
|
30 |
+
result = U2Net.Segmentation(images=[media],
|
31 |
+
paths=None,
|
32 |
+
batch_size=1,
|
33 |
+
input_size=320,
|
34 |
+
output_dir='output',
|
35 |
+
visualization=True)
|
|
|
36 |
result = combine.combiner(images = pixL().toThePixL([result[0]['front'][:,:,::-1], result[0]['mask']],
|
37 |
pixel_size),
|
38 |
+
background_image = media)
|
39 |
else:
|
40 |
+
result = pixL().toThePixL([media], pixel_size)
|
41 |
+
result.save('cache.png')
|
42 |
+
return [Image.fromarray(result), 'cache.png']
|
43 |
|
44 |
+
inputs = [,
|
45 |
gr.Slider(4, 100, value=12, step = 2, label="Pixel Size"),
|
46 |
gr.Checkbox(label="Object-Oriented Inference", value=False)]
|
47 |
+
outputs = [gr.Video(label="Pixed Media"),
|
48 |
+
gr.File(label="Download")]
|
49 |
+
title = "Pixera: Create your own Pixel Art"
|
50 |
+
description = """Mobile applications will have released soon. ^^ """
|
51 |
gr.Interface(fn = initilize,
|
52 |
inputs = inputs,
|
53 |
outputs = outputs).launch()
|
methods/img2pixl.py
CHANGED
@@ -1,71 +1,61 @@
|
|
|
|
1 |
import cv2
|
|
|
|
|
2 |
import numpy as np
|
|
|
|
|
3 |
from PIL import Image
|
|
|
|
|
|
|
4 |
|
5 |
-
|
6 |
-
#Author: Alican Akca
|
7 |
-
def __init__(self,numOfSquaresW = None, numOfSquaresH= None, size = [False, (512,512)],square = 6,ImgH = None,ImgW = None,images = [],background_image = None):
|
8 |
-
self.images = images
|
9 |
-
self.size = size
|
10 |
-
self.ImgH = ImgH
|
11 |
-
self.ImgW = ImgW
|
12 |
-
self.square = square
|
13 |
-
self.numOfSquaresW = numOfSquaresW
|
14 |
-
self.numOfSquaresH = numOfSquaresH
|
15 |
|
16 |
-
|
17 |
-
|
|
|
|
|
18 |
|
19 |
-
size = (image.shape[0] - (image.shape[0] % 4), image.shape[1] - (image.shape[1] % 4))
|
20 |
-
image = cv2.resize(image, size)
|
21 |
-
image = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
|
22 |
-
|
23 |
-
if len(self.images) == 1:
|
24 |
-
return self.images[0]
|
25 |
-
else:
|
26 |
-
return self.images
|
27 |
-
|
28 |
-
def toThePixL(self,images, pixel_size):
|
29 |
-
self.images = []
|
30 |
-
self.square = pixel_size
|
31 |
-
for image in images:
|
32 |
-
image = Image.fromarray(image)
|
33 |
-
image = image.convert("RGB")
|
34 |
-
self.ImgW, self.ImgH = image.size
|
35 |
-
self.images.append(pixL.epicAlgorithm(self, image))
|
36 |
-
|
37 |
-
return pixL.preprocess(self)
|
38 |
-
|
39 |
-
def numOfSquaresFunc(self):
|
40 |
-
self.numOfSquaresW = round((self.ImgW / self.square) + 1)
|
41 |
-
self.numOfSquaresH = round((self.ImgH / self.square) + 1)
|
42 |
-
|
43 |
-
def epicAlgorithm(self, image):
|
44 |
-
pixValues = []
|
45 |
-
pixL.numOfSquaresFunc(self)
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
return background
|
|
|
1 |
+
import os
|
2 |
import cv2
|
3 |
+
import torch
|
4 |
+
import warnings
|
5 |
import numpy as np
|
6 |
+
import gradio as gr
|
7 |
+
import paddlehub as hub
|
8 |
from PIL import Image
|
9 |
+
from methods.img2pixl import pixL
|
10 |
+
from examples.pixelArt.combine import combine
|
11 |
+
from methods.media import Media
|
12 |
|
13 |
+
warnings.filterwarnings("ignore")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
U2Net = hub.Module(name='U2Net')
|
16 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
17 |
+
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
|
18 |
+
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
def initilize(media,pixel_size,checkbox1):
|
22 |
+
#Author: Alican Akca
|
23 |
+
if media.name.endswith('.gif'):
|
24 |
+
return Media().split(media.name,pixel_size)
|
25 |
+
elif media.name.endswith('.mp4'):
|
26 |
+
return Media().split(media.name,pixel_size)
|
27 |
+
else:
|
28 |
+
media = Image.open(media.name).convert("RGB")
|
29 |
+
media = cv2.cvtColor(np.asarray(face2paint(model, media)), cv2.COLOR_BGR2RGB)
|
30 |
+
if checkbox1:
|
31 |
+
result = U2Net.Segmentation(images=[media],
|
32 |
+
paths=None,
|
33 |
+
batch_size=1,
|
34 |
+
input_size=320,
|
35 |
+
output_dir='output',
|
36 |
+
visualization=True)
|
37 |
+
result = combine.combiner(images = pixL().toThePixL([result[0]['front'][:,:,::-1], result[0]['mask']],
|
38 |
+
pixel_size),
|
39 |
+
background_image = media)
|
40 |
+
else:
|
41 |
+
result = pixL().toThePixL([media], pixel_size)
|
42 |
+
result = Image.fromarray(result)
|
43 |
+
result.save('cache.png')
|
44 |
+
return [None, result, 'cache.png']
|
45 |
+
|
46 |
+
inputs = [gr.File(label="Media"),
|
47 |
+
gr.Slider(4, 100, value=12, step = 2, label="Pixel Size"),
|
48 |
+
gr.Checkbox(label="Object-Oriented Inference", value=False)]
|
49 |
+
outputs = [gr.Video(label="Pixed Media"),
|
50 |
+
gr.Image(label="Pixed Media"),
|
51 |
+
gr.File(label="Download")]
|
52 |
+
|
53 |
+
title = "Pixera: Create your own Pixel Art"
|
54 |
+
description = """Mobile applications will have released soon ^^ """
|
55 |
+
|
56 |
+
gr.Interface(fn = initilize,
|
57 |
+
inputs = inputs,
|
58 |
+
outputs = outputs,
|
59 |
+
title=title,
|
60 |
+
description=description).launch()
|
61 |
|
|
methods/media.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import torch
|
3 |
+
import imageio
|
4 |
+
from methods.img2pixl import pixL
|
5 |
+
|
6 |
+
|
7 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
+
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
|
9 |
+
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()
|
10 |
+
|
11 |
+
class Media:
|
12 |
+
#Author: Alican Akca
|
13 |
+
def __init__(self,fname = None,pixel_size = None):
|
14 |
+
self.fname = fname
|
15 |
+
self.pixel_size = pixel_size
|
16 |
+
|
17 |
+
def split(self,fname,pixel_size):
|
18 |
+
media = cv2.VideoCapture(fname)
|
19 |
+
frames = []
|
20 |
+
while True:
|
21 |
+
ret, cv2Image = media.read()
|
22 |
+
if not ret:
|
23 |
+
break
|
24 |
+
frames.append(cv2Image)
|
25 |
+
frames = pixL().toThePixL(frames, pixel_size)
|
26 |
+
imageio.mimsave('cache.gif', frames)
|
27 |
+
output_file = "cache.mp4"
|
28 |
+
out = cv2.VideoWriter(output_file,cv2.VideoWriter_fourcc(*'h264'), 15, (frames[0].shape[1],frames[0].shape[0]))
|
29 |
+
for i in range(len(frames)):
|
30 |
+
out.write(frames[i])
|
31 |
+
out.release()
|
32 |
+
return [output_file, None, 'cache.gif']
|
requirements.txt
CHANGED
@@ -1,16 +1,7 @@
|
|
1 |
pip
|
2 |
torch
|
3 |
-
|
4 |
-
Flask
|
5 |
-
gunicorn
|
6 |
Pillow
|
7 |
-
|
8 |
-
google-cloud-storage
|
9 |
-
algorithmia
|
10 |
-
scikit-video
|
11 |
-
tf_slim
|
12 |
-
PyYaml
|
13 |
-
flask-ngrok
|
14 |
-
paddlepaddle
|
15 |
paddlehub
|
16 |
-
|
|
|
1 |
pip
|
2 |
torch
|
3 |
+
gradio
|
|
|
|
|
4 |
Pillow
|
5 |
+
imageio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
paddlehub
|
7 |
+
opencv_python
|