johnrachwanpruna commited on
Commit
039dbfb
1 Parent(s): a11d5f5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +45 -37
README.md CHANGED
@@ -70,43 +70,51 @@ You can run the smashed model with these steps:
70
  2. Load & run the model.
71
  ```python
72
 
73
- import torch
74
-
75
- from optimum.quanto import freeze, qfloat8, quantize
76
-
77
- from diffusers import FlowMatchEulerDiscreteScheduler, AutoencoderKL
78
- from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
79
- from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
80
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
81
-
82
- dtype = torch.bfloat16
83
-
84
- bfl_repo = "black-forest-labs/FLUX.1-schnell"
85
- revision = "refs/pr/1"
86
- local_path = "FLUX.1-schnell-8bit"
87
-
88
- scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler", revision=revision)
89
- text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
90
- tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
91
- text_encoder_2 = torch.load(local_path + '/text_encoder_2.pt')
92
- tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype, revision=revision)
93
- vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype, revision=revision)
94
- transformer = torch.load(local_path + '/transformer.pt')
95
-
96
- pipe = FluxPipeline(
97
- scheduler=scheduler,
98
- text_encoder=text_encoder,
99
- tokenizer=tokenizer,
100
- text_encoder_2=None,
101
- tokenizer_2=tokenizer_2,
102
- vae=vae,
103
- transformer=None,
104
- )
105
- pipe.text_encoder_2 = text_encoder_2
106
- pipe.transformer = transformer
107
- pipe.enable_model_cpu_offload()
108
-
109
- generator = torch.Generator().manual_seed(12345)
 
 
 
 
 
 
 
 
110
  ```
111
 
112
  ## Configurations
 
70
  2. Load & run the model.
71
  ```python
72
 
73
+ import torch
74
+
75
+ from optimum.quanto import freeze, qfloat8, quantize
76
+
77
+ from diffusers import FlowMatchEulerDiscreteScheduler, AutoencoderKL
78
+ from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
79
+ from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
80
+ from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
81
+
82
+ dtype = torch.bfloat16
83
+
84
+ bfl_repo = "black-forest-labs/FLUX.1-schnell"
85
+ revision = "refs/pr/1"
86
+ local_path = "FLUX.1-schnell-8bit"
87
+
88
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler", revision=revision)
89
+ text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
90
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
91
+ text_encoder_2 = torch.load(local_path + '/text_encoder_2.pt')
92
+ tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype, revision=revision)
93
+ vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype, revision=revision)
94
+ transformer = torch.load(local_path + '/transformer.pt')
95
+
96
+ pipe = FluxPipeline(
97
+ scheduler=scheduler,
98
+ text_encoder=text_encoder,
99
+ tokenizer=tokenizer,
100
+ text_encoder_2=None,
101
+ tokenizer_2=tokenizer_2,
102
+ vae=vae,
103
+ transformer=None,
104
+ )
105
+ pipe.text_encoder_2 = text_encoder_2
106
+ pipe.transformer = transformer
107
+ pipe.enable_model_cpu_offload()
108
+
109
+ generator = torch.Generator().manual_seed(12345)
110
+ image = pipe(
111
+ prompt,
112
+ guidance_scale=0.0,
113
+ num_inference_steps=4,
114
+ max_sequence_length=256,
115
+ generator=torch.Generator("cpu").manual_seed(0)
116
+ ).images[0]
117
+ image.save("flux-schnell.png")
118
  ```
119
 
120
  ## Configurations