Update README.md
Browse files
README.md
CHANGED
@@ -25,3 +25,57 @@ See [examples](https://huggingface.co/Norod78/sd2-dreambooth-ClaymationXmas/tree
|
|
25 |
![Collage 2](https://huggingface.co/Norod78/sd2-dreambooth-ClaymationXmas/resolve/main/collage_2.jpeg)
|
26 |
![Collage 3](https://huggingface.co/Norod78/sd2-dreambooth-ClaymationXmas/resolve/main/collage_3.jpeg)
|
27 |
![Collage 4](https://huggingface.co/Norod78/sd2-dreambooth-ClaymationXmas/resolve/main/collage_4.jpeg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
![Collage 2](https://huggingface.co/Norod78/sd2-dreambooth-ClaymationXmas/resolve/main/collage_2.jpeg)
|
26 |
![Collage 3](https://huggingface.co/Norod78/sd2-dreambooth-ClaymationXmas/resolve/main/collage_3.jpeg)
|
27 |
![Collage 4](https://huggingface.co/Norod78/sd2-dreambooth-ClaymationXmas/resolve/main/collage_4.jpeg)
|
28 |
+
|
29 |
+
```py
|
30 |
+
|
31 |
+
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
32 |
+
import torch
|
33 |
+
|
34 |
+
def main():
|
35 |
+
#////////////////////////////////////////////
|
36 |
+
seed = 42
|
37 |
+
model = "Norod78/sd2-dreambooth-ClaymationXmas"
|
38 |
+
#////////////////////////////////////////////
|
39 |
+
|
40 |
+
torch.manual_seed(seed)
|
41 |
+
generator = torch.Generator()
|
42 |
+
generator.manual_seed(seed)
|
43 |
+
|
44 |
+
scheduler = DPMSolverMultistepScheduler(
|
45 |
+
beta_start=0.00085,
|
46 |
+
beta_end=0.012,
|
47 |
+
beta_schedule="scaled_linear",
|
48 |
+
num_train_timesteps=1000,
|
49 |
+
trained_betas=None,
|
50 |
+
predict_epsilon=True,
|
51 |
+
thresholding=False,
|
52 |
+
algorithm_type="dpmsolver++",
|
53 |
+
solver_type="midpoint",
|
54 |
+
lower_order_final=True,
|
55 |
+
)
|
56 |
+
|
57 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
58 |
+
dtype = torch.float16 if device == "cuda" else torch.float32
|
59 |
+
pipe = StableDiffusionPipeline.from_pretrained(model, scheduler=scheduler,torch_dtype=dtype, generator=generator,use_auth_token=True).to(device)
|
60 |
+
|
61 |
+
#////////////////////////////////////////////
|
62 |
+
num_inference_steps = 20
|
63 |
+
width=512
|
64 |
+
height=512
|
65 |
+
samples=4
|
66 |
+
#////////////////////////////////////////////
|
67 |
+
|
68 |
+
prompt = "Willy Wonka, ClaymationXmas"
|
69 |
+
result = pipe([prompt] * samples, num_inference_steps=num_inference_steps, height=height, width=width)
|
70 |
+
images = result["images"]
|
71 |
+
for i, image in enumerate(images):
|
72 |
+
prompt_to_print = str(i) + "-" + prompt
|
73 |
+
output_file = prompt_to_print.replace(" ", "_") + "-" + str(width) + "x" +str(height)+ "_" + str(num_inference_steps) + "steps" + "_seed" + str(seed) + ".jpg"
|
74 |
+
image.save(output_file)
|
75 |
+
print("Saved: " + str(output_file))
|
76 |
+
|
77 |
+
if __name__ == '__main__':
|
78 |
+
main()
|
79 |
+
```
|
80 |
+
|
81 |
+
Fine Tuned by [@Norod78](https://twitter.com/Norod78)
|