|
|
|
|
|
|
|
import base64 |
|
import json |
|
import os |
|
from io import BytesIO |
|
import pandas as pd |
|
from PIL import Image |
|
from dotenv import load_dotenv |
|
import requests |
|
from transformers import pipeline |
|
|
|
|
|
def get_nsfw_score(image_path:str,model:"模型")->float: |
|
|
|
img = Image.open(image_path) |
|
result = model(images=img) |
|
nsfw_score = next((item['score'] for item in result if item['label']=='nsfw'),None) |
|
return nsfw_score |
|
|
|
|
|
if __name__ == '__main__': |
|
load_dotenv() |
|
model = pipeline("image-classification", model="Falconsai/nsfw_image_detection") |
|
|
|
img_path = 'manga' |
|
subdir_path = os.path.join(os.getcwd(), img_path) |
|
|
|
|
|
image_files = [] |
|
for root, dirs, files in os.walk(subdir_path): |
|
for file in files: |
|
if file.endswith(".jpg") or file.endswith(".png"): |
|
image_files.append(os.path.relpath(os.path.join(root, file))) |
|
for image_path in image_files: |
|
result = get_nsfw_score(image_path) |
|
if result> 0.5: |
|
print("发现问题图片,需要删除以过审:",image_path) |
|
os.remove(image_path) |
|
else: |
|
print(image_path, "图片没有问题") |
|
|
|
|
|
|