Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,6 @@ from PIL import Image
|
|
10 |
labels = ["drawings", "hentai", "neutral", "porn", "sexy"]
|
11 |
description = f"""This is a demo of classifing nsfw pictures. Label division is based on the following:
|
12 |
[*https://github.com/alex000kim/nsfw_data_scraper*](https://github.com/alex000kim/nsfw_data_scraper).
|
13 |
-
(If you want to test, please drop the example pictures instead of clicking)
|
14 |
|
15 |
You can continue to train this model with the same preprocess-to-images.
|
16 |
Finally, welcome to star my [*github repository*](https://github.com/csuer411/nsfw_classify)"""
|
@@ -49,17 +48,7 @@ model.load_state_dict(torch.load("classify_nsfw_v3.0.pth", map_location="cpu"))
|
|
49 |
model.eval()
|
50 |
|
51 |
|
52 |
-
def img_convert(inp):
|
53 |
-
with io.BytesIO() as f:
|
54 |
-
inp.save(f, format="JPEG")
|
55 |
-
img_data = f.getvalue()
|
56 |
-
img_base64 = base64.b64encode(img_data)
|
57 |
-
return img_base64
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
def predict(inp):
|
62 |
-
temp_inp = inp
|
63 |
inp = preprocess(inp).unsqueeze(0)
|
64 |
with torch.no_grad():
|
65 |
prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
|
@@ -67,8 +56,8 @@ def predict(inp):
|
|
67 |
return result
|
68 |
|
69 |
|
70 |
-
inputs = gr.
|
71 |
-
outputs = gr.
|
72 |
gr.Interface(
|
73 |
fn=predict, inputs=inputs, outputs=outputs, examples=["./example/anime.jpg", "./example/real.jpg"], description=description,
|
74 |
).launch()
|
|
|
10 |
labels = ["drawings", "hentai", "neutral", "porn", "sexy"]
|
11 |
description = f"""This is a demo of classifing nsfw pictures. Label division is based on the following:
|
12 |
[*https://github.com/alex000kim/nsfw_data_scraper*](https://github.com/alex000kim/nsfw_data_scraper).
|
|
|
13 |
|
14 |
You can continue to train this model with the same preprocess-to-images.
|
15 |
Finally, welcome to star my [*github repository*](https://github.com/csuer411/nsfw_classify)"""
|
|
|
48 |
model.eval()
|
49 |
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
def predict(inp):
|
|
|
52 |
inp = preprocess(inp).unsqueeze(0)
|
53 |
with torch.no_grad():
|
54 |
prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
|
|
|
56 |
return result
|
57 |
|
58 |
|
59 |
+
inputs = gr.Image(type='pil')
|
60 |
+
outputs = gr.Label(num_top_classes=2)
|
61 |
gr.Interface(
|
62 |
fn=predict, inputs=inputs, outputs=outputs, examples=["./example/anime.jpg", "./example/real.jpg"], description=description,
|
63 |
).launch()
|