--- dataset_info: features: - name: Prompt dtype: string - name: Category dtype: string - name: Challenge dtype: string - name: Note dtype: string - name: images dtype: image - name: model_name dtype: string - name: seed dtype: int64 splits: - name: train num_bytes: 163668480.032 num_examples: 1632 download_size: 163766653 dataset_size: 163668480.032 --- # Dataset Card for "kandinsky-2-2" The dataset was generated using the code below: ```python import PIL import torch from datasets import Dataset, Features from datasets import Image as ImageFeature from datasets import Value, load_dataset from diffusers import DiffusionPipeline def main(): print("Loading dataset...") parti_prompts = load_dataset("nateraw/parti-prompts", split="train") print("Loading pipeline...") pipe_prior = DiffusionPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to("cuda") pipe_prior.set_progress_bar_config(disable=True) t2i_pipe = DiffusionPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) t2i_pipe.to("cuda") t2i_pipe.set_progress_bar_config(disable=True) seed = 0 generator = torch.Generator("cuda").manual_seed(seed) ckpt_id = ( "kandinsky-community/" + "kandinsky-2-2-prior" + "_" + "kandinsky-2-2-decoder" ) print("Running inference...") main_dict = {} for i in range(len(parti_prompts)): sample = parti_prompts[i] prompt = sample["Prompt"] image_embeds, negative_image_embeds = pipe_prior( prompt, generator=generator, num_inference_steps=100, guidance_scale=7.5, ).to_tuple() image = t2i_pipe( image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, generator=generator, num_inference_steps=100, guidance_scale=7.5, ).images[0] image = image.resize((256, 256), resample=PIL.Image.Resampling.LANCZOS) img_path = f"kandinsky_22_{i}.png" image.save(img_path) main_dict.update( { prompt: { "img_path": img_path, "Category": sample["Category"], "Challenge": sample["Challenge"], "Note": sample["Note"], "model_name": ckpt_id, "seed": seed, } } ) def generation_fn(): for prompt in main_dict: prompt_entry = main_dict[prompt] yield { "Prompt": prompt, "Category": prompt_entry["Category"], "Challenge": prompt_entry["Challenge"], "Note": prompt_entry["Note"], "images": {"path": prompt_entry["img_path"]}, "model_name": prompt_entry["model_name"], "seed": prompt_entry["seed"], } print("Preparing HF dataset...") ds = Dataset.from_generator( generation_fn, features=Features( Prompt=Value("string"), Category=Value("string"), Challenge=Value("string"), Note=Value("string"), images=ImageFeature(), model_name=Value("string"), seed=Value("int64"), ), ) ds_id = "diffusers-parti-prompts/kandinsky-2-2" ds.push_to_hub(ds_id) if __name__ == "__main__": main() ```