|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def swap_scale_shift(weight): |
|
shift, scale = weight.chunk(2, dim=0) |
|
new_weight = torch.cat([scale, shift], dim=0) |
|
|
|
return new_weight |
|
|
|
def convert_diffusers_to_flux_checkpoint( |
|
converted_state_dict, |
|
num_layers=19, |
|
num_single_layers=38, |
|
inner_dim=3072, |
|
mlp_ratio=4.0 |
|
): |
|
""" |
|
84c3df90-9df5-48c2-9fa0-1e81324e61bf |
|
|
|
Reverses the conversion from Diffusers checkpoint to Flux Transformer format. |
|
|
|
This function takes a state dictionary that has been converted to the Diffusers format |
|
and transforms it back to the original Flux Transformer checkpoint format. It systematically |
|
maps each parameter from the Diffusers naming and structure back to the original format, |
|
handling different components such as embeddings, transformer blocks, and normalization layers. |
|
|
|
Args: |
|
converted_state_dict (dict): The state dictionary in Diffusers format to be converted back. |
|
num_layers (int, optional): Number of transformer layers in the original model. Default is 19. |
|
num_single_layers (int, optional): Number of single transformer layers. Default is 38. |
|
inner_dim (int, optional): The inner dimension size for MLP layers. Default is 3072. |
|
mlp_ratio (float, optional): The ratio to compute the MLP hidden dimension. Default is 4.0. |
|
|
|
Returns: |
|
dict: The original state dictionary in Flux Transformer checkpoint format. |
|
""" |
|
|
|
original_state_dict = {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict["time_in.in_layer.weight"] = converted_state_dict.pop( |
|
"time_text_embed.timestep_embedder.linear_1.weight" |
|
) |
|
original_state_dict["time_in.in_layer.bias"] = converted_state_dict.pop( |
|
"time_text_embed.timestep_embedder.linear_1.bias" |
|
) |
|
original_state_dict["time_in.out_layer.weight"] = converted_state_dict.pop( |
|
"time_text_embed.timestep_embedder.linear_2.weight" |
|
) |
|
original_state_dict["time_in.out_layer.bias"] = converted_state_dict.pop( |
|
"time_text_embed.timestep_embedder.linear_2.bias" |
|
) |
|
|
|
|
|
original_state_dict["vector_in.in_layer.weight"] = converted_state_dict.pop( |
|
"time_text_embed.text_embedder.linear_1.weight" |
|
) |
|
original_state_dict["vector_in.in_layer.bias"] = converted_state_dict.pop( |
|
"time_text_embed.text_embedder.linear_1.bias" |
|
) |
|
original_state_dict["vector_in.out_layer.weight"] = converted_state_dict.pop( |
|
"time_text_embed.text_embedder.linear_2.weight" |
|
) |
|
original_state_dict["vector_in.out_layer.bias"] = converted_state_dict.pop( |
|
"time_text_embed.text_embedder.linear_2.bias" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
has_guidance = any("guidance_embedder" in k for k in converted_state_dict) |
|
if has_guidance: |
|
|
|
original_state_dict["guidance_in.in_layer.weight"] = converted_state_dict.pop( |
|
"time_text_embed.guidance_embedder.linear_1.weight" |
|
) |
|
original_state_dict["guidance_in.in_layer.bias"] = converted_state_dict.pop( |
|
"time_text_embed.guidance_embedder.linear_1.bias" |
|
) |
|
original_state_dict["guidance_in.out_layer.weight"] = converted_state_dict.pop( |
|
"time_text_embed.guidance_embedder.linear_2.weight" |
|
) |
|
original_state_dict["guidance_in.out_layer.bias"] = converted_state_dict.pop( |
|
"time_text_embed.guidance_embedder.linear_2.bias" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict["txt_in.weight"] = converted_state_dict.pop("context_embedder.weight") |
|
original_state_dict["txt_in.bias"] = converted_state_dict.pop("context_embedder.bias") |
|
|
|
|
|
original_state_dict["img_in.weight"] = converted_state_dict.pop("x_embedder.weight") |
|
original_state_dict["img_in.bias"] = converted_state_dict.pop("x_embedder.bias") |
|
|
|
|
|
|
|
|
|
|
|
for i in range(num_layers): |
|
|
|
block_prefix = f"transformer_blocks.{i}." |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.img_mod.lin.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}norm1.linear.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.img_mod.lin.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}norm1.linear.bias" |
|
) |
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.txt_mod.lin.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}norm1_context.linear.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.txt_mod.lin.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}norm1_context.linear.bias" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
q_weight = converted_state_dict.pop(f"{block_prefix}attn.to_q.weight") |
|
k_weight = converted_state_dict.pop(f"{block_prefix}attn.to_k.weight") |
|
v_weight = converted_state_dict.pop(f"{block_prefix}attn.to_v.weight") |
|
|
|
original_state_dict[f"double_blocks.{i}.img_attn.qkv.weight"] = torch.cat([q_weight, k_weight, v_weight], dim=0) |
|
|
|
|
|
q_bias = converted_state_dict.pop(f"{block_prefix}attn.to_q.bias") |
|
k_bias = converted_state_dict.pop(f"{block_prefix}attn.to_k.bias") |
|
v_bias = converted_state_dict.pop(f"{block_prefix}attn.to_v.bias") |
|
|
|
original_state_dict[f"double_blocks.{i}.img_attn.qkv.bias"] = torch.cat([q_bias, k_bias, v_bias], dim=0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
add_q_weight = converted_state_dict.pop(f"{block_prefix}attn.add_q_proj.weight") |
|
add_k_weight = converted_state_dict.pop(f"{block_prefix}attn.add_k_proj.weight") |
|
add_v_weight = converted_state_dict.pop(f"{block_prefix}attn.add_v_proj.weight") |
|
|
|
original_state_dict[f"double_blocks.{i}.txt_attn.qkv.weight"] = torch.cat([add_q_weight, add_k_weight, add_v_weight], dim=0) |
|
|
|
add_q_bias = converted_state_dict.pop(f"{block_prefix}attn.add_q_proj.bias") |
|
add_k_bias = converted_state_dict.pop(f"{block_prefix}attn.add_k_proj.bias") |
|
add_v_bias = converted_state_dict.pop(f"{block_prefix}attn.add_v_proj.bias") |
|
|
|
original_state_dict[f"double_blocks.{i}.txt_attn.qkv.bias"] = torch.cat([add_q_bias, add_k_bias, add_v_bias], dim=0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.img_attn.norm.query_norm.scale"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.norm_q.weight" |
|
) |
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.img_attn.norm.key_norm.scale"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.norm_k.weight" |
|
) |
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.txt_attn.norm.query_norm.scale"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.norm_added_q.weight" |
|
) |
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.txt_attn.norm.key_norm.scale"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.norm_added_k.weight" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.img_mlp.0.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}ff.net.0.proj.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.img_mlp.0.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}ff.net.0.proj.bias" |
|
) |
|
original_state_dict[f"double_blocks.{i}.img_mlp.2.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}ff.net.2.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.img_mlp.2.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}ff.net.2.bias" |
|
) |
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.txt_mlp.0.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}ff_context.net.0.proj.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.txt_mlp.0.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}ff_context.net.0.proj.bias" |
|
) |
|
original_state_dict[f"double_blocks.{i}.txt_mlp.2.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}ff_context.net.2.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.txt_mlp.2.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}ff_context.net.2.bias" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.img_attn.proj.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.to_out.0.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.img_attn.proj.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.to_out.0.bias" |
|
) |
|
|
|
|
|
original_state_dict[f"double_blocks.{i}.txt_attn.proj.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.to_add_out.weight" |
|
) |
|
original_state_dict[f"double_blocks.{i}.txt_attn.proj.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.to_add_out.bias" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
for i in range(num_single_layers): |
|
|
|
block_prefix = f"single_transformer_blocks.{i}." |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict[f"single_blocks.{i}.modulation.lin.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}norm.linear.weight" |
|
) |
|
original_state_dict[f"single_blocks.{i}.modulation.lin.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}norm.linear.bias" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
q_weight = converted_state_dict.pop(f"{block_prefix}attn.to_q.weight") |
|
k_weight = converted_state_dict.pop(f"{block_prefix}attn.to_k.weight") |
|
v_weight = converted_state_dict.pop(f"{block_prefix}attn.to_v.weight") |
|
proj_mlp_weight = converted_state_dict.pop(f"{block_prefix}proj_mlp.weight") |
|
|
|
|
|
combined_weight = torch.cat([q_weight, k_weight, v_weight, proj_mlp_weight], dim=0) |
|
original_state_dict[f"single_blocks.{i}.linear1.weight"] = combined_weight |
|
|
|
|
|
q_bias = converted_state_dict.pop(f"{block_prefix}attn.to_q.bias") |
|
k_bias = converted_state_dict.pop(f"{block_prefix}attn.to_k.bias") |
|
v_bias = converted_state_dict.pop(f"{block_prefix}attn.to_v.bias") |
|
proj_mlp_bias = converted_state_dict.pop(f"{block_prefix}proj_mlp.bias") |
|
|
|
|
|
combined_bias = torch.cat([q_bias, k_bias, v_bias, proj_mlp_bias], dim=0) |
|
original_state_dict[f"single_blocks.{i}.linear1.bias"] = combined_bias |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict[f"single_blocks.{i}.norm.query_norm.scale"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.norm_q.weight" |
|
) |
|
|
|
|
|
original_state_dict[f"single_blocks.{i}.norm.key_norm.scale"] = converted_state_dict.pop( |
|
f"{block_prefix}attn.norm_k.weight" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict[f"single_blocks.{i}.linear2.weight"] = converted_state_dict.pop( |
|
f"{block_prefix}proj_out.weight" |
|
) |
|
original_state_dict[f"single_blocks.{i}.linear2.bias"] = converted_state_dict.pop( |
|
f"{block_prefix}proj_out.bias" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
original_state_dict["final_layer.linear.weight"] = converted_state_dict.pop("proj_out.weight") |
|
original_state_dict["final_layer.linear.bias"] = converted_state_dict.pop("proj_out.bias") |
|
|
|
|
|
original_state_dict["final_layer.adaLN_modulation.1.weight"] = swap_scale_shift( |
|
converted_state_dict.pop("norm_out.linear.weight") |
|
) |
|
original_state_dict["final_layer.adaLN_modulation.1.bias"] = swap_scale_shift( |
|
converted_state_dict.pop("norm_out.linear.bias") |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(converted_state_dict) > 0: |
|
|
|
remaining_keys = list(converted_state_dict.keys()) |
|
print(f"Warning: The following keys were not mapped and remain in the state dict: {remaining_keys}") |
|
|
|
|
|
return original_state_dict |
|
|