Upload coherent_pizzapigeon.cfg

#1
by Pinguin - opened
Files changed (1) hide show
  1. coherent_pizzapigeon.cfg +96 -0
coherent_pizzapigeon.cfg ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #This settings file can be loaded back to Latent Majesty Diffusion. If you like your setting consider sharing it to the settings library at https://github.com/multimodalart/MajestyDiffusion
2
+ [model]
3
+ latent_diffusion_model = finetuned
4
+ #THIS SETTING CAN RUN IN T4!!!
5
+ [clip_list]
6
+ perceptors = ['[clip - mlfoundations - ViT-B-16--openai]', '[clip - mlfoundations - ViT-L-14--openai]', '[clip - mlfoundations - RN50x16--openai]', '[clip - mlfoundations - ViT-B-32--laion2b_e16]']
7
+
8
+ [basic_settings]
9
+ #Perceptor things
10
+ #Everthing Disabled Here.
11
+ #width = 256
12
+ #height = 256
13
+ #latent_diffusion_guidance_scale = 10
14
+ #clip_guidance_scale = 16000
15
+ #aesthetic_loss_scale = 200
16
+ #augment_cuts=True
17
+
18
+ #Init image settings
19
+ starting_timestep = 0.9
20
+ init_scale = 1000
21
+ init_brightness = 0.0
22
+
23
+ [advanced_settings]
24
+ #Add CLIP Guidance and all the flavors or just run normal Latent Diffusion
25
+ use_cond_fn = True
26
+
27
+ #Custom schedules for cuts. Check out the schedules documentation here
28
+ custom_schedule_setting = [[30, 1000, 8], 'gfpgan:1.5', [20, 200, 8], 'gfpgan:1.0', [50, 220, 4]]
29
+
30
+ #Cut settings
31
+ clamp_index = [2.4, 2.1]
32
+ cut_overview = [8]*500 + [4]*500
33
+ cut_innercut = [0]*500 + [4]*500
34
+ cut_blur_n = [0]*1300
35
+ cut_blur_kernel = 3
36
+ cut_ic_pow = 0.6
37
+ cut_icgray_p = [0.1]*300 + [0]*1000
38
+ cutn_batches = 1
39
+ range_index = [0]*200 + [50000.0]*400 + [0]*1000
40
+ active_function = "softsign"
41
+ ths_method= "clamp"
42
+ tv_scales = [150]*1 + [0]*3
43
+
44
+ #If you uncomment this line you can schedule the CLIP guidance across the steps. Otherwise the clip_guidance_scale will be used
45
+ clip_guidance_schedule = [16000]*1000
46
+
47
+ #Apply symmetric loss (force simmetry to your results)
48
+ symmetric_loss_scale = 0
49
+
50
+ #Latent Diffusion Advanced Settings
51
+ #Use when latent upscale to correct satuation problem
52
+ scale_div = 1
53
+ #Magnify grad before clamping by how many times
54
+ opt_mag_mul = 20
55
+ opt_ddim_eta = 1.3
56
+ opt_eta_end = 1.1
57
+ opt_temperature = 0.98
58
+
59
+ #Grad advanced settings
60
+ grad_center = False
61
+ #Lower value result in more coherent and detailed result, higher value makes it focus on more dominent concept
62
+ grad_scale=0.25
63
+ score_modifier = True
64
+ threshold_percentile = 0.85
65
+ threshold = 1
66
+ var_index = [2]*300 + [0]*700
67
+ var_range = 0.5
68
+ mean_index = [0]*1000
69
+ mean_range = 0.75
70
+
71
+ #Init image advanced settings
72
+ init_rotate=False
73
+ mask_rotate=False
74
+ init_magnitude = 0.18215
75
+
76
+ #More settings
77
+ RGB_min = -0.95
78
+ RGB_max = 0.95
79
+ #How to pad the image with cut_overview
80
+ padargs = {'mode': 'constant', 'value': -1}
81
+ flip_aug=False
82
+
83
+ #Experimental aesthetic embeddings, work only with OpenAI ViT-B/32 and ViT-L/14
84
+ experimental_aesthetic_embeddings = True
85
+ #How much you want this to influence your result
86
+ experimental_aesthetic_embeddings_weight = 0.3
87
+ #9 are good aesthetic embeddings, 0 are bad ones
88
+ experimental_aesthetic_embeddings_score = 8
89
+
90
+ # For fun dont change except if you really know what your are doing
91
+ grad_blur = False
92
+ compress_steps = 200
93
+ compress_factor = 0.1
94
+ punish_steps = 200
95
+ punish_factor = 0.5
96
+