Spaces:
Runtime error
Runtime error
File size: 2,531 Bytes
19d5322 89a1eb7 19d5322 e86ba53 be78d6b e86ba53 68c1ba8 19d5322 07ee0fa 19d5322 63ec850 07ee0fa 63ec850 19d5322 63ec850 8d8f758 07ee0fa 8d8f758 2801ab0 19d5322 63ec850 19d5322 a97a9cc 9873299 a97a9cc a810030 9873299 8d8f758 92fbc04 a29c4b9 8d8f758 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import os
os.system("pip install gradio==3.32.0")
from PIL import Image
import gradio as gr
import torch
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
model2 = torch.hub.load(
"AK391/animegan2-pytorch:main",
"generator",
pretrained = True,
device = DEVICE, #"cuda",
progress = False
)
model1 = torch.hub.load(
"AK391/animegan2-pytorch:main",
"generator",
pretrained = "face_paint_512_v1",
device = DEVICE
)
face2paint = torch.hub.load(
'AK391/animegan2-pytorch:main',
'face2paint',
size = 512,
device = DEVICE,
#trust_repo = True,
side_by_side = False
)
def inference(img, ver):
if ver == 'version 2 (🔺 robustness,🔻 stylization)':
out = face2paint(model2, img)
else:
out = face2paint(model1, img)
return out
title = "Deepfake Detection"
description = "This gradio contains a GAN-generated image detector developed to distinguish real images from synthetic ones."
article = "<p style='text-align: center'><a href='https://github.com/polimi-ispl/GAN-image-detection' target='_blank'>polimi-ispl/GAN-image-detection</a></p>"
examples=[
['images/fake0.jpg','Fake Female'],
['images/fake1.png','Fake Male'],
['images/real0.jpg','Real Female'],
['images/real1.jpg','Real Male'],
]
#gr.Interface(inference,
# inputs=gr.inputs.Image(type="pil"),
# outputs=gr.outputs.Image(type="pil"),
# title=title,
# description=description,
# article=article,
# examples=examples,
# allow_flagging= 'auto',
# allow_screenshot=False
#).launch(enable_queue=True,cache_examples=False)
#interface =
gr.Interface(
inference,
gr.inputs.Image(type="pil"),
gr.outputs.Image(type="pil"),
title=title,
description=description,
article=article,
examples=examples,
allow_flagging=False,
allow_screenshot=False
).launch(enable_queue=True,cache_examples=True)
"""
fn = inference,
inputs=[
gr.inputs.Image(label="Input Image", type="pil"),
"text"
],
outputs=[
gr.outputs.Label(label="Class"),
"text",
gr.outputs.Image(label="Output Face with Explainability", type="pil")
],
title = title,
description = description,
article = article,
examples = examples,
allow_flagging = 'auto',
allow_screenshot = False
).launch(enable_queue=True,cache_examples=True)
""" |