# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb. # %% auto 0 __all__ = ['model_name', 'device', 'better_vae', 'unet_attn_slice', 'sampler_kls', 'hf_sampler', 'model_kwargs', 'num_steps', 'height', 'width', 'k_sampler', 'use_karras_sigmas', 'NEG_PROMPT', 'generation_kwargs', 'baseline_g', 'max_val', 'min_val', 'num_warmup_steps', 'warmup_init_val', 'num_cycles', 'k_decay', 'DEFAULT_COS_PARAMS', 'static_sched', 'k_sched', 'inv_k_sched', 'scheds', 'iface', 'load_model', 'cos_harness', 'compare_dynamic_guidance'] # %% app.ipynb 1 import gradio as gr from cf_guidance import schedules, transforms from min_diffusion.core import MinimalDiffusion import torch import nbdev # %% app.ipynb 2 ## MODEL SETUP ###################################### ###################################### model_name = 'stabilityai/stable-diffusion-2' device = ('cpu','cuda')[torch.cuda.is_available()] if device == 'cuda': revision = 'fp16' dtype = torch.float16 else: revision = 'fp32' dtype = torch.float32 # model parameters better_vae = '' unet_attn_slice = True sampler_kls = 'dpm_multi' hf_sampler = 'dpm_multi' model_kwargs = { 'better_vae': better_vae, 'unet_attn_slice': unet_attn_slice, 'scheduler_kls': hf_sampler, } def load_model(): pipeline = MinimalDiffusion( model_name, device, dtype, revision, **model_kwargs, ) pipeline.load() return pipeline ###################################### ###################################### # %% app.ipynb 3 ## GENERATION PARAMETERS ###################################### ###################################### num_steps = 18 height, width = 768, 768 k_sampler = 'k_dpmpp_2m' #'k_dpmpp_sde' use_karras_sigmas = True # a good negative prompt NEG_PROMPT = "ugly, stock photo, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy, blurred, text, watermark, grainy" generation_kwargs = { 'num_steps': num_steps, 'height': height, 'width': width, 'k_sampler': k_sampler, 'negative_prompt': NEG_PROMPT, 'use_karras_sigmas': use_karras_sigmas, } ###################################### ###################################### # %% app.ipynb 4 ## dynamicCFG SETUP ###################################### ###################################### # default cosine schedule parameters baseline_g = 9 # default, static guidance value max_val = 9 # the max scheduled guidance scaling value min_val = 6 # the minimum scheduled guidance value num_warmup_steps = 0 # number of warmup steps warmup_init_val = 0 # the intial warmup value num_cycles = 0.5 # number of cosine cycles k_decay = 1 # k-decay for cosine curve scaling # group the default schedule parameters DEFAULT_COS_PARAMS = { 'max_val': max_val, 'num_steps': num_steps, 'min_val': min_val, 'num_cycles': num_cycles, 'k_decay': k_decay, 'num_warmup_steps': num_warmup_steps, 'warmup_init_val': warmup_init_val, } def cos_harness(new_params: dict) -> dict: '''Creates cosine schedules with updated parameters in `new_params` ''' # start from the given baseline `default_params` cos_params = dict(DEFAULT_COS_PARAMS) # update the with the new, given parameters cos_params.update(new_params) # return the new cosine schedule sched = schedules.get_cos_sched(**cos_params) return sched # build the static schedule static_sched = [baseline_g] * num_steps # build the inverted kdecay schedule k_sched = cos_harness({'k_decay': 0.2}) inv_k_sched = [max_val - g + min_val for g in k_sched] # group the schedules scheds = { 'cosine': {'g': inv_k_sched}, 'static': {'g': static_sched}, } ###################################### ###################################### # %% app.ipynb 5 def compare_dynamic_guidance(prompt): ''' Compares the default, static Classifier-free Guidance to a dynamic schedule. Model and sampling paramters: Stable Diffusion 2 v-model Half-precision DPM++ 2M sampler, with Karras sigma schedule 18 sampling steps (768 x 768) image Using a generic negative prompt Schedules: Static guidance with scale of 9 Inverse kDecay (cosine variant) scheduled guidance ''' # load the model pipeline = load_model() # stores the output images res = [] # generate images with static and dynamic schedules for (name,sched) in scheds.items(): # make the guidance norm gtfm = transforms.GuidanceTfm(sched) # generate the image with torch.autocast(device), torch.no_grad(): img = pipeline.generate(prompt, gtfm, **generation_kwargs) # add the generated image res.append(name) # return the generated images return { 'values': res, 'label': 'Cosine vs. Static CFG' } # %% app.ipynb 6 iface = gr.Interface( compare_dynamic_guidance, inputs="text", outputs=gr.Gallery(), title="Comparing image generations with dynamic Classifier-free Guidance", ) iface.launch()