|
|
|
#This settings file can be loaded back to Latent Majesty Diffusion. If you like your setting consider sharing it to the settings library at https: |
|
[clip_list] |
|
perceptors = ['[clip - mlfoundations - ViT-B-16--openai]', '[clip - mlfoundations - ViT-B-16--laion400m_e32]', '[clip - mlfoundations - ViT-B-32--laion2b_e16]'] |
|
|
|
[basic_settings] |
|
#Perceptor things |
|
|
|
#clip_prompts = ['portrait of a princess in sanctuary, hyperrealistic painting trending on artstation'] |
|
#atent_prompts = ['portrait of a princess in sanctuary, hyperrealistic painting trending on artstation'] |
|
#latent_negatives = [''] |
|
#image_prompts = [] |
|
|
|
|
|
latent_diffusion_guidance_scale = 2 |
|
clip_guidance_scale = 5000 |
|
aesthetic_loss_scale = 200 |
|
augment_cuts=True |
|
|
|
#Init image settings |
|
starting_timestep = 0.9 |
|
init_scale = 0 |
|
init_brightness = 0.0 |
|
init_noise = 0.6 |
|
|
|
[advanced_settings] |
|
#Add CLIP Guidance and all the flavors or just run normal Latent Diffusion |
|
use_cond_fn = True |
|
|
|
#Custom schedules for cuts. Check out the schedules documentation here |
|
custom_schedule_setting = [[30, 900.0, 8], 'gfpgan:1', [20, 200, 6]] |
|
|
|
#Cut settings |
|
clamp_index = [1]*1000 |
|
cut_overview = [8]*500 + [4]*500 |
|
cut_innercut = [0]*500 + [4]*500 |
|
cut_ic_pow = 0.1 |
|
cut_icgray_p = [0.1]*300 + [0]*1000 |
|
cutn_batches = 1 |
|
range_index = [0]*1300 |
|
active_function = "softsign" |
|
tv_scales = [1200]*1 + [600]*3 |
|
latent_tv_loss = True |
|
|
|
#If you uncomment this line you can schedule the CLIP guidance across the steps. Otherwise the clip_guidance_scale will be used |
|
clip_guidance_schedule = [5000]*1000 |
|
|
|
#Apply symmetric loss (force simmetry to your results) |
|
symmetric_loss_scale = 0 |
|
|
|
#Latent Diffusion Advanced Settings |
|
#Use when latent upscale to correct satuation problem |
|
scale_div = 0.5 |
|
#Magnify grad before clamping by how many times |
|
opt_mag_mul = 10 |
|
opt_ddim_eta = 1.4 |
|
opt_eta_end = 1.0 |
|
opt_temperature = 0.975 |
|
|
|
#Grad advanced settings |
|
grad_center = False |
|
#Lower value result in more coherent and detailed result, higher value makes it focus on more dominent concept |
|
grad_scale=0.5 |
|
|
|
#Init image advanced settings |
|
init_rotate=False |
|
mask_rotate=False |
|
init_magnitude = 0.15 |
|
|
|
#More settings |
|
RGB_min = -0.95 |
|
RGB_max = 0.95 |
|
#How to pad the image with cut_overview |
|
padargs = {'mode': 'constant', 'value': -1} |
|
flip_aug=False |
|
cc = 60 |
|
#Experimental aesthetic embeddings, work only with OpenAI ViT-B/32 and ViT-L/14 |
|
experimental_aesthetic_embeddings = False |
|
#How much you want this to influence your result |
|
experimental_aesthetic_embeddings_weight = 1 |
|
#9 are good aesthetic embeddings, 0 are bad ones |
|
experimental_aesthetic_embeddings_score = 8 |
|
|
|
#Deactivating new stuff from 1.5 |
|
score_modifier = False |
|
compress_steps = 0 |
|
punish_steps = 0 |
|
|