Spaces:
Paused
Paused
File size: 702 Bytes
14c8ffd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
# Run this before you deploy it on replicate, because if you don't
# whenever you run the model, it will download the weights from the
# internet, which will take a long time.
import torch
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from transformers import CLIPFeatureExtractor
from transformers import CLIPImageProcessor
safety = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker",
torch_dtype=torch.float16,
)
safety.save_pretrained("./safety-cache")
fe = feature_extractor = CLIPImageProcessor.from_pretrained(
"openai/clip-vit-base-patch32",
)
fe.save_pretrained("./feature-extractor")
|