patrickvonplaten commited on
Commit
b704080
1 Parent(s): 27f745e

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +32 -111
README.md CHANGED
@@ -1,111 +1,32 @@
1
- ```py
2
- from PIL import Image
3
- import torch
4
- from muse import PipelineMuse, MaskGiTUViT
5
- from datasets import Dataset, Features
6
- from datasets import Image as ImageFeature
7
- from datasets import Value, load_dataset
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
-
11
- pipe = PipelineMuse.from_pretrained(
12
- transformer_path="valhalla/research-run",
13
- text_encoder_path="openMUSE/clip-vit-large-patch14-text-enc",
14
- vae_path="openMUSE/vqgan-f16-8192-laion",
15
- ).to(device)
16
-
17
- pipe.transformer = MaskGiTUViT.from_pretrained("valhalla/research-run-finetuned-journeydb", revision="06bcd6ab6580a2ed3275ddfc17f463b8574457da", subfolder="ema_model").to(device)
18
- pipe.tokenizer.pad_token_id = 49407
19
-
20
- if device == "cuda":
21
- pipe.transformer.enable_xformers_memory_efficient_attention()
22
- pipe.text_encoder.to(torch.float16)
23
- pipe.transformer.to(torch.float16)
24
-
25
-
26
- import PIL
27
-
28
-
29
- def main():
30
- print("Loading dataset...")
31
- parti_prompts = load_dataset("nateraw/parti-prompts", split="train")
32
-
33
- print("Loading pipeline...")
34
- seed = 0
35
-
36
- device = "cuda"
37
- torch.manual_seed(0)
38
-
39
- ckpt_id = "openMUSE/muse-512"
40
-
41
- scale = 10
42
-
43
- print("Running inference...")
44
- main_dict = {}
45
- for i in range(len(parti_prompts)):
46
- sample = parti_prompts[i]
47
- prompt = sample["Prompt"]
48
-
49
- image = pipe(
50
- prompt,
51
- timesteps=16,
52
- negative_text=None,
53
- guidance_scale=scale,
54
- temperature=(2, 0),
55
- orig_size=(512, 512),
56
- crop_coords=(0, 0),
57
- aesthetic_score=6,
58
- use_fp16=device == "cuda",
59
- transformer_seq_len=1024,
60
- use_tqdm=False,
61
- )[0]
62
-
63
- image = image.resize((256, 256), resample=PIL.Image.Resampling.LANCZOS)
64
- img_path = f"/home/patrick/muse_images/muse_512_{i}.png"
65
- image.save(img_path)
66
- main_dict.update(
67
- {
68
- prompt: {
69
- "img_path": img_path,
70
- "Category": sample["Category"],
71
- "Challenge": sample["Challenge"],
72
- "Note": sample["Note"],
73
- "model_name": ckpt_id,
74
- "seed": seed,
75
- }
76
- }
77
- )
78
-
79
- def generation_fn():
80
- for prompt in main_dict:
81
- prompt_entry = main_dict[prompt]
82
- yield {
83
- "Prompt": prompt,
84
- "Category": prompt_entry["Category"],
85
- "Challenge": prompt_entry["Challenge"],
86
- "Note": prompt_entry["Note"],
87
- "images": {"path": prompt_entry["img_path"]},
88
- "model_name": prompt_entry["model_name"],
89
- "seed": prompt_entry["seed"],
90
- }
91
-
92
- print("Preparing HF dataset...")
93
- ds = Dataset.from_generator(
94
- generation_fn,
95
- features=Features(
96
- Prompt=Value("string"),
97
- Category=Value("string"),
98
- Challenge=Value("string"),
99
- Note=Value("string"),
100
- images=ImageFeature(),
101
- model_name=Value("string"),
102
- seed=Value("int64"),
103
- ),
104
- )
105
- ds_id = "diffusers-parti-prompts/muse512"
106
- ds.push_to_hub(ds_id)
107
-
108
-
109
- if __name__ == "__main__":
110
- main()
111
- ```
 
1
+ ---
2
+ configs:
3
+ - config_name: default
4
+ data_files:
5
+ - split: train
6
+ path: data/train-*
7
+ dataset_info:
8
+ features:
9
+ - name: Prompt
10
+ dtype: string
11
+ - name: Category
12
+ dtype: string
13
+ - name: Challenge
14
+ dtype: string
15
+ - name: Note
16
+ dtype: string
17
+ - name: images
18
+ dtype: image
19
+ - name: model_name
20
+ dtype: string
21
+ - name: seed
22
+ dtype: int64
23
+ splits:
24
+ - name: train
25
+ num_bytes: 128701081.568
26
+ num_examples: 1632
27
+ download_size: 127769152
28
+ dataset_size: 128701081.568
29
+ ---
30
+ # Dataset Card for "muse_512"
31
+
32
+ [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)