enable_conv = true # An example for use different algo/settings in "full" preset unet_target_module = [ "Transformer2DModel", "ResnetBlock2D", "Downsample2D", "Upsample2D", ] unet_target_name = [ ".*time_embed\\..+", ".*label_emb\\..+", ".*input_blocks\\.0.+", "^out\\..+" ] text_encoder_target_module = [ "CLIPAttention", "CLIPMLP", ] text_encoder_target_name = [ # "token_embedding", # not supported, Embedding module in CLIP ] [module_algo_map] [module_algo_map.CrossAttention] algo = "lokr" dim = 10000 alpha = 10000 factor = 4 [module_algo_map.FeedForward] # MLP Layer in UNet algo = "lokr" dim = 10000 alpha = 10000 factor = 2 [module_algo_map.CLIPAttention] # Attention Layer in TE algo = "lokr" dim = 10000 alpha = 10000 factor = 8 [module_algo_map.CLIPMLP] # MLP Layer in TE algo = "lokr" dim = 10000 alpha = 10000 factor = 8