from functools import partial from random import randint import gradio as gr import torch from tqdm import tqdm from NestedPipeline import NestedStableDiffusionPipeline from NestedScheduler import NestedScheduler def run(prompt, outer, inner, random_seed, pipe): seed = 24 if not random_seed else randint(0, 10000) generator = torch.Generator(device).manual_seed(seed) outer_diffusion = tqdm(range(outer), desc="Outer Diffusion") inner_diffusion = tqdm(range(inner), desc="Inner Diffusion") cur = [0, 0] for i, j, im in pipe(prompt, num_inference_steps=outer, num_inner_steps=inner, generator=generator): if cur[-1] != j: inner_diffusion.update() cur[-1] = j if cur[0] != i and i != outer: cur[0] = i outer_diffusion.update() cur[-1] = 0 inner_diffusion = tqdm(range(inner), desc="Inner Diffusion") elif cur[0] != i: outer_diffusion.update() monospace_s, monospace_e = "
", "
" yield f"{monospace_s}{outer_diffusion.__str__().replace(' ', ' ')}{monospace_e} \n {monospace_s}{inner_diffusion.__str__().replace(' ', ' ')}{monospace_e}", im[0] if __name__ == "__main__": scheduler = NestedScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", prediction_type='sample', clip_sample=False, set_alpha_to_one=False) fp16 = False if fp16: pipe = NestedStableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", revision="fp16", torch_dtype=torch.float16, scheduler=scheduler) else: pipe = NestedStableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", scheduler=scheduler) device = "cuda" if torch.cuda.is_available() else "cpu" pipe.to(device) interface = partial(run, pipe=pipe) demo = gr.Interface( fn=interface, title="Nested Diffusion", description="Help: Type the desired prompt in the prompt box, and adjust the number of outer and inner steps to use. Using more steps takes more time, but should create a better image.
For more information on Nested Diffuion: Github, arXiv