File size: 1,446 Bytes
0ded642 920e680 0ded642 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
---
license: apache-2.0
pipeline_tag: image-classification
---
# About
This is a fork of MichalMlodawski/nsfw-image-detection-large which became unavailable.
# Usage example
```
from PIL import Image
import torch
from transformers import AutoProcessor, FocalNetForImageClassification
DEVICE = torch.device("cuda")
model_path = "lovetillion/nsfw-image-detection-large"
# Load the model and feature extractor
feature_extractor = AutoProcessor.from_pretrained(model_path)
model = FocalNetForImageClassification.from_pretrained(model_path).to(DEVICE)
model.eval()
# Mapping from model labels to NSFW categories
label_to_category = {
"LABEL_0": "Safe",
"LABEL_1": "Questionable",
"LABEL_2": "Unsafe"
}
filename = "example.png"
image = Image.open(filename)
inputs = feature_extractor(images=image, return_tensors="pt")
inputs.to(DEVICE)
with torch.no_grad():
outputs = model(**inputs)
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
confidence, predicted = torch.max(probabilities, 1)
label = model.config.id2label[predicted.item()]
if label != "SAFE":
print( label, confidence.item() * 100, filename )
else:
print( label, confidence.item() * 100, filename )
```
# For more information
* Live demonstration in a production ensemble workflow: https://piglet.video
* Results from our ethical AI whitepaper: https://lovetillion.org/liaise.pdf
* Join us on Telegram at https://t.me/pigletproject |