# File: open-muse-main/benchmark/model_quality.py import matplotlib.pyplot as plt import argparse cfg = [1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0, 15.0, 20.0] fid_512 = [56.13683, 48.3625, 43.13792, 42.07286, 41.21331, 41.21309, 40.76164, 40.51427, 40.22781, 39.66504, 38.57083] clip_512 = [23.168075, 24.3268, 25.29295, 25.67775, 25.93075, 26.068925, 26.15145, 26.151175, 26.26665, 26.3845, 26.402225] isc_512 = [20.32828279489911, 23.092083811105134, 25.34707454898865, 25.782333543568505, 26.779519535473717, 26.72532414371535, 26.8378182891666, 27.02354446351334, 27.235757940256587, 27.461719798190302, 27.37252925955596] fid_256 = [43.64503, 40.57112, 39.38306, 39.29915, 40.10225, 41.97274, 45.10721, 49.11104, 59.13854, 81.46585, 96.3426] clip_256 = [24.191875, 25.035825, 25.689725, 26.0217, 26.1032, 26.048225, 25.90045, 25.691, 25.319, 24.49525, 23.915725] isc_256 = [21.247120913990408, 23.008063867685443, 23.49288416726619, 24.13530452474164, 23.197031957136875, 21.741427950979876, 20.435789339047123, 18.84057076723702, 15.793238717380486, 10.74857386855099, 8.62769427725863] if __name__ == '__main__': args = argparse.ArgumentParser() args.add_argument('--fid', action='store_true') args.add_argument('--isc', action='store_true') args.add_argument('--clip', action='store_true') args = args.parse_args() if args.fid: plt.title(f'FID') plt.ylabel('FID Score (10k)') plt.plot(cfg, fid_256, marker='o', label='muse-256') plt.plot(cfg, fid_512, marker='o', label='muse-512') elif args.isc: plt.title(f'Inception Score') plt.ylabel('Inception Score (10k)') plt.plot(cfg, isc_256, marker='o', label='muse-256') plt.plot(cfg, isc_512, marker='o', label='muse-512') elif args.clip: plt.title(f'CLIP Score') plt.ylabel('CLIP Score (10k)') plt.plot(cfg, clip_256, marker='o', label='muse-256') plt.plot(cfg, clip_512, marker='o', label='muse-512') else: assert False plt.xlabel('cfg scale') plt.legend() plt.grid(True) if args.fid: plt.savefig('./benchmark/artifacts/fid.png') elif args.isc: plt.savefig('./benchmark/artifacts/isc.png') elif args.clip: plt.savefig('./benchmark/artifacts/clip.png') else: assert False # File: open-muse-main/benchmark/muse_chart.py from argparse import ArgumentParser import matplotlib.pyplot as plt import pandas as pd '' bar_width = 0.1 def main(): parser = ArgumentParser() parser.add_argument('--device', choices=['4090', 'a100'], required=True) parser.add_argument('--batch-size', type=int, choices=[1, 8], required=True) args = parser.parse_args() df = pd.read_csv('benchmark/artifacts/all.csv') df['Median'] = df['Median'].apply(lambda x: round(x / 1000, 2)) (fig, axs) = plt.subplots(1, 1, sharey='row') chart(df=df, device=args.device, batch_size=args.batch_size, plot_on=axs) axs.set_ylabel('Median Time (s)') axs.set_title(f'{args.device} Batch size: {args.batch_size}') plt.show() def chart(df, device, batch_size, plot_on): fdf = df[(df['Device'] == device) & (df['Use Xformers'] == True) & ((df['Use Fused Residual Norm'] == True) | df['Use Fused Residual Norm'].isna()) & (df['Batch Size'] == batch_size)] chart_values = {'stable diffusion 1.5; resolution 512; timesteps 20': fdf[(fdf['Model Name'] == 'stable_diffusion_1_5') & (fdf['Timesteps'] == '20')].iloc[0]['Median'], 'sdxl; resolution 1024; timesteps 12': fdf[(fdf['Model Name'] == 'sdxl') & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'sdxl; resolution 1024; timesteps 20': fdf[(fdf['Model Name'] == 'sdxl') & (fdf['Timesteps'] == '20')].iloc[0]['Median'], 'ssd 1b; resolution 1024; timesteps 12': fdf[(fdf['Model Name'] == 'ssd_1b') & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'ssd 1b; resolution 1024; timesteps 20': fdf[(fdf['Model Name'] == 'ssd_1b') & (fdf['Timesteps'] == '20')].iloc[0]['Median'], 'wurst; resolution 1024': fdf[fdf['Model Name'] == 'wurst'].iloc[0]['Median'], 'lcm; resolution 512; timesteps 4': fdf[(fdf['Model Name'] == 'lcm') & (fdf['Timesteps'] == '4')].iloc[0]['Median'], 'lcm; resolution 512; timesteps 8': fdf[(fdf['Model Name'] == 'lcm') & (fdf['Timesteps'] == '8')].iloc[0]['Median'], 'muse; resolution 256; timesteps 12': fdf[(fdf['Model Name'] == 'muse') & (fdf['Resolution'] == 256) & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'muse; resolution 512; timesteps 12': fdf[(fdf['Model Name'] == 'muse') & (fdf['Resolution'] == 512) & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'sd-turbo; resolution 512; timesteps 1': fdf[fdf['Model Name'] == 'sd_turbo'].iloc[0]['Median'], 'sdxl-turbo; resolution 1024; timesteps 1': fdf[fdf['Model Name'] == 'sdxl_turbo'].iloc[0]['Median']} colors = ['g', 'r', 'c', 'm', 'y', 'k', 'purple', '#FF5733', (0.2, 0.4, 0.6), 'lime', 'navy', 'hotpink'] colors = {x: y for (x, y) in zip(chart_values.keys(), colors)} chart_values = [x for x in chart_values.items()] chart_values = sorted(chart_values, key=lambda x: x[1]) placement = 0 for (label, value) in chart_values: color = colors[label] label = f'{label}; {value} s' label = '\n'.join(label.split(';')) bars = plot_on.bar(placement, value, width=bar_width, label=label, color=color) bar = bars[0] yval = bar.get_height() plot_on.text(bar.get_x() + bar.get_width() / 2, yval + 0.05, label, ha='center', va='bottom', fontsize='small') placement = placement + bar_width + 0.05 if __name__ == '__main__': main() # File: open-muse-main/benchmark/muse_perf.py import csv from argparse import ArgumentParser import torch from diffusers import AutoencoderKL, AutoPipelineForText2Image, LatentConsistencyModelPipeline, LCMScheduler, StableDiffusionPipeline, StableDiffusionXLPipeline, UNet2DConditionModel from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS from torch.utils.benchmark import Compare, Timer from transformers import AutoTokenizer, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from muse import MaskGiTUViT, PipelineMuse, VQGANModel torch.manual_seed(0) torch.set_grad_enabled(False) torch.set_float32_matmul_precision('high') num_threads = torch.get_num_threads() prompt = 'A high tech solarpunk utopia in the Amazon rainforest' do_sd15 = True do_sdxl = True do_ssd_1b = True do_sdxl_turbo = True do_sd_turbo = True do_muse = True do_wurst = True do_lcm = True def main(): args = ArgumentParser() args.add_argument('--device', choices=['4090', 'a100'], required=True) args = args.parse_args() csv_data = [] for batch_size in [1, 8]: for timesteps in [12, 20]: for use_xformers in [False, True]: if do_sd15: (out, mem_bytes) = sd_benchmark(batch_size=batch_size, timesteps=timesteps, use_xformers=use_xformers) Compare([out]).print() print('*******') csv_data.append([batch_size, 'stable_diffusion_1_5', out.median * 1000, args.device, timesteps, mem_bytes, 512, use_xformers, None]) if do_sdxl: (out, mem_bytes) = sdxl_benchmark(batch_size=batch_size, timesteps=timesteps, use_xformers=use_xformers, gpu_type=args.device) Compare([out]).print() print('*******') csv_data.append([batch_size, 'sdxl', out.median * 1000, args.device, timesteps, mem_bytes, 1024, use_xformers, None]) if do_ssd_1b: (out, mem_bytes) = ssd_1b_benchmark(batch_size=batch_size, timesteps=timesteps, use_xformers=use_xformers, gpu_type=args.device) Compare([out]).print() print('*******') csv_data.append([batch_size, 'ssd_1b', out.median * 1000, args.device, timesteps, mem_bytes, 1024, use_xformers, None]) if do_muse: for resolution in [256, 512]: for use_fused_residual_norm in [False, True]: (out, mem_bytes) = muse_benchmark(resolution=resolution, batch_size=batch_size, timesteps=timesteps, use_xformers=use_xformers, use_fused_residual_norm=use_fused_residual_norm) Compare([out]).print() print('*******') csv_data.append([batch_size, 'muse', out.median * 1000, args.device, timesteps, mem_bytes, resolution, use_xformers, use_fused_residual_norm]) if do_sdxl_turbo: for use_xformers in [True, False]: timesteps = 1 (out, mem_bytes) = sdxl_turbo_benchmark(batch_size=batch_size, timesteps=timesteps, use_xformers=use_xformers) Compare([out]).print() print('*******') csv_data.append([batch_size, 'sdxl_turbo', out.median * 1000, args.device, timesteps, mem_bytes, 1024, use_xformers, None]) if do_sd_turbo: for use_xformers in [True, False]: timesteps = 1 (out, mem_bytes) = sd_turbo_benchmark(batch_size=batch_size, timesteps=timesteps, use_xformers=use_xformers) Compare([out]).print() print('*******') csv_data.append([batch_size, 'sd_turbo', out.median * 1000, args.device, timesteps, mem_bytes, 512, use_xformers, None]) if do_wurst: for use_xformers in [False, True]: (out, mem_bytes) = wurst_benchmark(batch_size, use_xformers) Compare([out]).print() print('*******') csv_data.append([batch_size, 'wurst', out.median * 1000, args.device, 'default', mem_bytes, 1024, use_xformers, None]) if do_lcm: for timesteps in [4, 8]: for use_xformers in [False, True]: (out, mem_bytes) = lcm_benchmark(batch_size, timesteps, use_xformers) Compare([out]).print() print('*******') csv_data.append([batch_size, 'lcm', out.median * 1000, args.device, timesteps, mem_bytes, 1024, use_xformers, None]) with open('benchmark/artifacts/all.csv', 'a', newline='') as csvfile: writer = csv.writer(csvfile) writer.writerows(csv_data) def muse_benchmark(resolution, batch_size, timesteps, use_xformers, use_fused_residual_norm): model = 'williamberman/muse_research_run_benchmarking_512_output' device = 'cuda' dtype = torch.float16 tokenizer = AutoTokenizer.from_pretrained(model, subfolder='text_encoder') text_encoder = CLIPTextModelWithProjection.from_pretrained(model, subfolder='text_encoder') text_encoder.to(device=device, dtype=dtype) vae = VQGANModel.from_pretrained(model, subfolder='vae') vae.to(device=device, dtype=dtype) transformer = MaskGiTUViT(use_fused_mlp=False, use_fused_residual_norm=use_fused_residual_norm, force_down_up_sample=resolution == 512) transformer = transformer.to(device=device, dtype=dtype) transformer.eval() if use_xformers: transformer.enable_xformers_memory_efficient_attention() pipe = PipelineMuse(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer) pipe.device = device pipe.dtype = dtype seq_len = (resolution // 16) ** 2 def benchmark_fn(): pipe(prompt, num_images_per_prompt=batch_size, timesteps=timesteps, transformer_seq_len=seq_len) pipe(prompt, num_images_per_prompt=batch_size, timesteps=2, transformer_seq_len=seq_len) def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, timesteps {timesteps}, resolution: {resolution}, use_xformers: {use_xformers}, use_fused_residual_norm: {use_fused_residual_norm}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def wurst_benchmark(batch_size, use_xformers): model = 'warp-ai/wuerstchen' device = 'cuda' dtype = torch.float16 pipe = AutoPipelineForText2Image.from_pretrained(model, torch_dtype=dtype).to(device) if use_xformers: pipe.enable_xformers_memory_efficient_attention() def benchmark_fn(): pipe(prompt, height=1024, width=1024, prior_timesteps=DEFAULT_STAGE_C_TIMESTEPS, prior_guidance_scale=4.0, num_images_per_prompt=batch_size) benchmark_fn() def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, use_xformers: {use_xformers}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def sdxl_benchmark(batch_size, timesteps, use_xformers, gpu_type): model = 'stabilityai/stable-diffusion-xl-base-1.0' device = 'cuda' dtype = torch.float16 pipe = StableDiffusionXLPipeline.from_pretrained(model, torch_dtype=dtype) pipe = pipe.to(device) if use_xformers: pipe.enable_xformers_memory_efficient_attention() if gpu_type == '4090' and batch_size == 8: output_type = 'latent' else: output_type = 'pil' def benchmark_fn(): pipe(prompt, num_inference_steps=timesteps, num_images_per_prompt=batch_size, output_type=output_type) pipe(prompt, num_inference_steps=2, num_images_per_prompt=batch_size, output_type=output_type) def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, timesteps {timesteps}, use_xformers: {use_xformers}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def lcm_benchmark(batch_size, timesteps, use_xformers): model = 'SimianLuo/LCM_Dreamshaper_v7' device = 'cuda' dtype = torch.float16 scheduler = LCMScheduler.from_pretrained(model, subfolder='scheduler') pipe = LatentConsistencyModelPipeline.from_pretrained(model, torch_dtype=dtype, scheduler=scheduler) pipe.to(device) if use_xformers: pipe.enable_xformers_memory_efficient_attention() def benchmark_fn(): pipe(prompt, num_inference_steps=timesteps, num_images_per_prompt=batch_size) pipe(prompt, num_inference_steps=2, num_images_per_prompt=batch_size) def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, timesteps {timesteps}, use_xformers: {use_xformers}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def ssd_1b_benchmark(batch_size, timesteps, use_xformers, gpu_type): model = 'segmind/SSD-1B' device = 'cuda' dtype = torch.float16 pipe = StableDiffusionXLPipeline.from_pretrained(model, torch_dtype=dtype, use_safetensors=True, variant='fp16') pipe.to(device) if use_xformers: pipe.enable_xformers_memory_efficient_attention() if gpu_type == '4090' and batch_size == 8: output_type = 'latent' else: output_type = 'pil' def benchmark_fn(): pipe(prompt, num_inference_steps=timesteps, num_images_per_prompt=batch_size, output_type=output_type) pipe(prompt, num_inference_steps=2, num_images_per_prompt=batch_size, output_type=output_type) def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, timesteps {timesteps}, use_xformers: {use_xformers}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def sd_benchmark(batch_size, timesteps, use_xformers): model = 'runwayml/stable-diffusion-v1-5' device = 'cuda' dtype = torch.float16 tokenizer = CLIPTokenizer.from_pretrained(model, subfolder='tokenizer') text_encoder = CLIPTextModel.from_pretrained(model, subfolder='text_encoder') text_encoder.to(device=device, dtype=dtype) vae = AutoencoderKL.from_pretrained(model, subfolder='vae') vae = vae.to(device=device, dtype=dtype) unet = UNet2DConditionModel.from_pretrained(model, subfolder='unet') unet = unet.to(device=device, dtype=dtype) pipe = StableDiffusionPipeline.from_pretrained(model, vae=vae, unet=unet, text_encoder=text_encoder, tokenizer=tokenizer, safety_checker=None) if use_xformers: pipe.enable_xformers_memory_efficient_attention() def benchmark_fn(): pipe(prompt, num_images_per_prompt=batch_size, num_inference_steps=timesteps) pipe(prompt, num_images_per_prompt=batch_size, num_inference_steps=2) def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, timesteps {timesteps}, use_xformers: {use_xformers}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def sd_turbo_benchmark(batch_size, timesteps, use_xformers): model = 'stabilityai/sd-turbo' dtype = torch.float16 pipe = AutoPipelineForText2Image.from_pretrained(model, torch_dtype=torch.float16, variant='fp16', safety_checker=None) pipe.to('cuda') if use_xformers: pipe.enable_xformers_memory_efficient_attention() def benchmark_fn(): pipe(prompt, num_images_per_prompt=batch_size, num_inference_steps=timesteps) pipe(prompt, num_images_per_prompt=batch_size, num_inference_steps=2) def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, timesteps {timesteps}, use_xformers: {use_xformers}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def sdxl_turbo_benchmark(batch_size, timesteps, use_xformers): model = 'stabilityai/sdxl-turbo' dtype = torch.float16 pipe = AutoPipelineForText2Image.from_pretrained(model, torch_dtype=torch.float16, variant='fp16', safety_checker=None) pipe.to('cuda') if use_xformers: pipe.enable_xformers_memory_efficient_attention() def benchmark_fn(): pipe(prompt, num_images_per_prompt=batch_size, num_inference_steps=timesteps) pipe(prompt, num_images_per_prompt=batch_size, num_inference_steps=2) def fn(): return Timer(stmt='benchmark_fn()', globals={'benchmark_fn': benchmark_fn}, num_threads=num_threads, label=f'batch_size: {batch_size}, dtype: {dtype}, timesteps {timesteps}, use_xformers: {use_xformers}', description=model).blocked_autorange(min_run_time=1) return measure_max_memory_allocated(fn) def measure_max_memory_allocated(fn): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() rv = fn() mem_bytes = torch.cuda.max_memory_allocated() return (rv, mem_bytes) if __name__ == '__main__': main() # File: open-muse-main/benchmark/muse_table.py """""" from argparse import ArgumentParser import pandas as pd '' def main(): parser = ArgumentParser() parser.add_argument('--device', choices=['4090', 'a100'], required=True) parser.add_argument('--batch-size', type=int, choices=[1, 8], required=True) args = parser.parse_args() df = pd.read_csv('benchmark/artifacts/all.csv') df['Median'] = df['Median'].apply(lambda x: round(x / 1000, 2)) print(table(df=df, device=args.device, batch_size=args.batch_size)) def table(df, device, batch_size): fdf = df[(df['Device'] == device) & (df['Use Xformers'] == True) & ((df['Use Fused Residual Norm'] == True) | df['Use Fused Residual Norm'].isna()) & (df['Batch Size'] == batch_size)] chart_values = {'stable diffusion 1.5; resolution 512; timesteps 20': fdf[(fdf['Model Name'] == 'stable_diffusion_1_5') & (fdf['Timesteps'] == '20')].iloc[0]['Median'], 'sdxl; resolution 1024; timesteps 12': fdf[(fdf['Model Name'] == 'sdxl') & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'sdxl; resolution 1024; timesteps 20': fdf[(fdf['Model Name'] == 'sdxl') & (fdf['Timesteps'] == '20')].iloc[0]['Median'], 'ssd 1b; resolution 1024; timesteps 12': fdf[(fdf['Model Name'] == 'ssd_1b') & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'ssd 1b; resolution 1024; timesteps 20': fdf[(fdf['Model Name'] == 'ssd_1b') & (fdf['Timesteps'] == '20')].iloc[0]['Median'], 'wurst; resolution 1024; timesteps TODO': fdf[fdf['Model Name'] == 'wurst'].iloc[0]['Median'], 'lcm; resolution 512; timesteps 4': fdf[(fdf['Model Name'] == 'lcm') & (fdf['Timesteps'] == '4')].iloc[0]['Median'], 'lcm; resolution 512; timesteps 8': fdf[(fdf['Model Name'] == 'lcm') & (fdf['Timesteps'] == '8')].iloc[0]['Median'], 'muse-256; resolution 256; timesteps 12': fdf[(fdf['Model Name'] == 'muse') & (fdf['Resolution'] == 256) & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'muse-512; resolution 512; timesteps 12': fdf[(fdf['Model Name'] == 'muse') & (fdf['Resolution'] == 512) & (fdf['Timesteps'] == '12')].iloc[0]['Median'], 'sd-turbo; resolution 512; timesteps 1': fdf[fdf['Model Name'] == 'sd_turbo'].iloc[0]['Median'], 'sdxl-turbo; resolution 1024; timesteps 1': fdf[fdf['Model Name'] == 'sdxl_turbo'].iloc[0]['Median']} chart_values = [x for x in chart_values.items()] chart_values = sorted(chart_values, key=lambda x: x[1]) table = '\n\\begin{tabular}{|l|c|c|c|}\n\\hline\n\\textbf{ } & \\textbf{inference time} & \\textbf{timesteps} & \\textbf{resolution} \\\\ \\hline\n' for (label, value) in chart_values: (model, resolution, timesteps) = label.split(';') resolution = resolution.split(' ')[-1] timesteps = timesteps.split(' ')[-1] table += '\\textbf{' + f'{model}}} & {value} s & {timesteps} & {resolution}' + ' \\\\ \\hline' + '\n' table += '\\end{tabular}' return table if __name__ == '__main__': main() # File: open-muse-main/muse/logging.py """""" import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} _default_log_level = logging.WARNING _tqdm_active = True def _get_default_logging_level(): env_level_str = os.getenv('muse_VERBOSITY', None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning(f"Unknown option muse_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}") return _default_log_level def _get_library_name() -> str: return __name__.split('.')[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: return _default_handler = logging.StreamHandler() _default_handler.flush = sys.stderr.flush library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_log_levels_dict(): return log_levels def get_logger(name: Optional[str]=None) -> logging.Logger: if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): return set_verbosity(INFO) def set_verbosity_warning(): return set_verbosity(WARNING) def set_verbosity_debug(): return set_verbosity(DEBUG) def set_verbosity_error(): return set_verbosity(ERROR) def disable_default_handler() -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s') handler.setFormatter(formatter) def reset_format() -> None: handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None) def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv('muse_NO_ADVISORY_WARNINGS', False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice class EmptyTqdm: def __init__(self, *args, **kwargs): self._iterator = args[0] if args else None def __iter__(self): return iter(self._iterator) def __getattr__(self, _): def empty_fn(*args, **kwargs): return return empty_fn def __enter__(self): return self def __exit__(self, type_, value, traceback): return class _tqdm_cls: def __call__(self, *args, **kwargs): if _tqdm_active: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() tqdm = _tqdm_cls() def is_progress_bar_enabled() -> bool: global _tqdm_active return bool(_tqdm_active) def enable_progress_bar(): global _tqdm_active _tqdm_active = True def disable_progress_bar(): global _tqdm_active _tqdm_active = False # File: open-muse-main/muse/lr_schedulers.py """""" import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .logging import get_logger logger = get_logger(__name__) class SchedulerType(Enum): LINEAR = 'linear' COSINE = 'cosine' COSINE_WITH_RESTARTS = 'cosine_with_restarts' POLYNOMIAL = 'polynomial' CONSTANT = 'constant' CONSTANT_WITH_WARMUP = 'constant_with_warmup' def get_constant_schedule(optimizer: Optimizer, last_epoch: int=-1): return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int=-1): def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=-1): def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=-1): def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * (float(num_cycles) * progress % 1.0)))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_polynomial_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, lr_end=1e-07, power=1.0, last_epoch=-1): lr_init = optimizer.defaults['lr'] if not lr_init > lr_end: raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})') def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining ** power + lr_end return decay / lr_init return LambdaLR(optimizer, lr_lambda, last_epoch) TYPE_TO_SCHEDULER_FUNCTION = {SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup} def get_scheduler(name: Union[str, SchedulerType], optimizer: Optimizer, num_warmup_steps: Optional[int]=None, num_training_steps: Optional[int]=None, num_cycles: int=1, power: float=1.0): name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(optimizer) if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.') if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.') if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles) if name == SchedulerType.POLYNOMIAL: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, power=power) return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) # File: open-muse-main/muse/modeling_ema.py import copy from typing import Any, Dict, Iterable, Optional, Union import torch class EMAModel: def __init__(self, parameters: Iterable[torch.nn.Parameter], decay: float=0.9999, min_decay: float=0.0, update_after_step: int=0, update_every: int=1, use_ema_warmup: bool=False, inv_gamma: Union[float, int]=1.0, power: Union[float, int]=2 / 3, model_cls: Optional[Any]=None, model_config: Dict[str, Any]=None): parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] self.temp_stored_params = None self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.update_every = update_every self.use_ema_warmup = use_ema_warmup self.inv_gamma = inv_gamma self.power = power self.optimization_step = 0 self.cur_decay_value = None self.model_cls = model_cls self.model_config = model_config @classmethod def from_pretrained(cls, path, model_cls) -> 'EMAModel': (_, ema_kwargs) = model_cls.load_config(path, return_unused_kwargs=True) model = model_cls.from_pretrained(path) ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config) ema_model.load_state_dict(ema_kwargs) return ema_model def save_pretrained(self, path): if self.model_cls is None: raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.') if self.model_config is None: raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.') model = self.model_cls.from_config(self.model_config) state_dict = self.state_dict() state_dict.pop('shadow_params', None) model.register_to_config(**state_dict) self.copy_to(model.parameters()) model.save_pretrained(path) def get_decay(self, optimization_step: int) -> float: step = max(0, optimization_step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_ema_warmup: cur_decay_value = 1 - (1 + step / self.inv_gamma) ** (-self.power) else: cur_decay_value = (1 + step) / (10 + step) cur_decay_value = min(cur_decay_value, self.decay) cur_decay_value = max(cur_decay_value, self.min_decay) return cur_decay_value @torch.no_grad() def step(self, parameters: Iterable[torch.nn.Parameter]): parameters = list(parameters) self.optimization_step += 1 if (self.optimization_step - 1) % self.update_every != 0: return decay = self.get_decay(self.optimization_step) self.cur_decay_value = decay one_minus_decay = 1 - decay for (s_param, param) in zip(self.shadow_params, parameters): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param)) else: s_param.copy_(param) def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: parameters = list(parameters) for (s_param, param) in zip(self.shadow_params, parameters): param.data.copy_(s_param.to(param.device).data) def to(self, device=None, dtype=None) -> None: self.shadow_params = [p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params] def state_dict(self) -> dict: return {'decay': self.decay, 'min_decay': self.min_decay, 'optimization_step': self.optimization_step, 'update_after_step': self.update_after_step, 'use_ema_warmup': self.use_ema_warmup, 'inv_gamma': self.inv_gamma, 'power': self.power, 'shadow_params': self.shadow_params} def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: if self.temp_stored_params is None: raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights to `restore()`') for (c_param, param) in zip(self.temp_stored_params, parameters): param.data.copy_(c_param.data) self.temp_stored_params = None def load_state_dict(self, state_dict: dict) -> None: state_dict = copy.deepcopy(state_dict) self.decay = state_dict.get('decay', self.decay) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('Decay must be between 0 and 1') self.min_decay = state_dict.get('min_decay', self.min_decay) if not isinstance(self.min_decay, float): raise ValueError('Invalid min_decay') self.optimization_step = state_dict.get('optimization_step', self.optimization_step) if not isinstance(self.optimization_step, int): raise ValueError('Invalid optimization_step') self.update_after_step = state_dict.get('update_after_step', self.update_after_step) if not isinstance(self.update_after_step, int): raise ValueError('Invalid update_after_step') self.use_ema_warmup = state_dict.get('use_ema_warmup', self.use_ema_warmup) if not isinstance(self.use_ema_warmup, bool): raise ValueError('Invalid use_ema_warmup') self.inv_gamma = state_dict.get('inv_gamma', self.inv_gamma) if not isinstance(self.inv_gamma, (float, int)): raise ValueError('Invalid inv_gamma') self.power = state_dict.get('power', self.power) if not isinstance(self.power, (float, int)): raise ValueError('Invalid power') shadow_params = state_dict.get('shadow_params', None) if shadow_params is not None: self.shadow_params = shadow_params if not isinstance(self.shadow_params, list): raise ValueError('shadow_params must be a list') if not all((isinstance(p, torch.Tensor) for p in self.shadow_params)): raise ValueError('shadow_params must all be Tensors') # File: open-muse-main/muse/modeling_maskgit_vqgan.py """""" import math from typing import Tuple import torch import torch.nn.functional as F from torch import nn from .modeling_utils import ConfigMixin, ModelMixin, register_to_config class Conv2dSame(nn.Conv2d): def calc_same_pad(self, i: int, k: int, s: int, d: int) -> int: return max((math.ceil(i / s) - 1) * s + (k - 1) * d + 1 - i, 0) def forward(self, x: torch.Tensor) -> torch.Tensor: (ih, iw) = x.size()[-2:] pad_h = self.calc_same_pad(i=ih, k=self.kernel_size[0], s=self.stride[0], d=self.dilation[0]) pad_w = self.calc_same_pad(i=iw, k=self.kernel_size[1], s=self.stride[1], d=self.dilation[1]) if pad_h > 0 or pad_w > 0: x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]) return super().forward(x) class ResnetBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int=None, dropout_prob: float=0.0): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.out_channels_ = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.conv1 = Conv2dSame(self.in_channels, self.out_channels_, kernel_size=3, bias=False) self.norm2 = nn.GroupNorm(num_groups=32, num_channels=self.out_channels_, eps=1e-06, affine=True) self.dropout = nn.Dropout(dropout_prob) self.conv2 = Conv2dSame(self.out_channels_, self.out_channels_, kernel_size=3, bias=False) if self.in_channels != self.out_channels_: self.nin_shortcut = Conv2dSame(self.out_channels_, self.out_channels_, kernel_size=1, bias=False) def forward(self, hidden_states): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.in_channels != self.out_channels_: residual = self.nin_shortcut(hidden_states) return hidden_states + residual class DownsamplingBlock(nn.Module): def __init__(self, config, block_idx: int): super().__init__() self.config = config self.block_idx = block_idx in_channel_mult = (1,) + tuple(self.config.channel_mult) block_in = self.config.hidden_channels * in_channel_mult[self.block_idx] block_out = self.config.hidden_channels * self.config.channel_mult[self.block_idx] res_blocks = nn.ModuleList() for _ in range(self.config.num_res_blocks): res_blocks.append(ResnetBlock(block_in, block_out, dropout_prob=self.config.dropout)) block_in = block_out self.block = res_blocks self.downsample = self.block_idx != self.config.num_resolutions - 1 def forward(self, hidden_states): for res_block in self.block: hidden_states = res_block(hidden_states) if self.downsample: hidden_states = F.avg_pool2d(hidden_states, kernel_size=2, stride=2) return hidden_states class UpsamplingBlock(nn.Module): def __init__(self, config, block_idx: int): super().__init__() self.config = config self.block_idx = block_idx if self.block_idx == self.config.num_resolutions - 1: block_in = self.config.hidden_channels * self.config.channel_mult[-1] else: block_in = self.config.hidden_channels * self.config.channel_mult[self.block_idx + 1] block_out = self.config.hidden_channels * self.config.channel_mult[self.block_idx] res_blocks = [] for _ in range(self.config.num_res_blocks): res_blocks.append(ResnetBlock(block_in, block_out, dropout_prob=self.config.dropout)) block_in = block_out self.block = nn.ModuleList(res_blocks) self.add_upsample = self.block_idx != 0 if self.add_upsample: self.upsample_conv = Conv2dSame(block_out, block_out, kernel_size=3) def forward(self, hidden_states): for res_block in self.block: hidden_states = res_block(hidden_states) if self.add_upsample: hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode='nearest') hidden_states = self.upsample_conv(hidden_states) return hidden_states class Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.conv_in = Conv2dSame(self.config.num_channels, self.config.hidden_channels, kernel_size=3, bias=False) downsample_blocks = [] for i_level in range(self.config.num_resolutions): downsample_blocks.append(DownsamplingBlock(self.config, block_idx=i_level)) self.down = nn.ModuleList(downsample_blocks) mid_channels = self.config.hidden_channels * self.config.channel_mult[-1] res_blocks = nn.ModuleList() for _ in range(self.config.num_res_blocks): res_blocks.append(ResnetBlock(mid_channels, mid_channels, dropout_prob=self.config.dropout)) self.mid = res_blocks self.norm_out = nn.GroupNorm(num_groups=32, num_channels=mid_channels, eps=1e-06, affine=True) self.conv_out = Conv2dSame(mid_channels, self.config.z_channels, kernel_size=1) def forward(self, pixel_values): hidden_states = self.conv_in(pixel_values) for block in self.down: hidden_states = block(hidden_states) for block in self.mid: hidden_states = block(hidden_states) hidden_states = self.norm_out(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class Decoder(nn.Module): def __init__(self, config): super().__init__() self.config = config block_in = self.config.hidden_channels * self.config.channel_mult[self.config.num_resolutions - 1] curr_res = self.config.resolution // 2 ** (self.config.num_resolutions - 1) self.z_shape = (1, self.config.z_channels, curr_res, curr_res) self.conv_in = Conv2dSame(self.config.z_channels, block_in, kernel_size=3) res_blocks = nn.ModuleList() for _ in range(self.config.num_res_blocks): res_blocks.append(ResnetBlock(block_in, block_in, dropout_prob=self.config.dropout)) self.mid = res_blocks upsample_blocks = [] for i_level in reversed(range(self.config.num_resolutions)): upsample_blocks.append(UpsamplingBlock(self.config, block_idx=i_level)) self.up = nn.ModuleList(list(reversed(upsample_blocks))) block_out = self.config.hidden_channels * self.config.channel_mult[0] self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_out, eps=1e-06, affine=True) self.conv_out = Conv2dSame(block_out, self.config.num_channels, kernel_size=3) def forward(self, hidden_states): hidden_states = self.conv_in(hidden_states) for block in self.mid: hidden_states = block(hidden_states) for block in reversed(self.up): hidden_states = block(hidden_states) hidden_states = self.norm_out(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost): super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.commitment_cost = commitment_cost self.embedding = nn.Embedding(num_embeddings, embedding_dim) self.embedding.weight.data.uniform_(-1.0 / num_embeddings, 1.0 / num_embeddings) def forward(self, hidden_states, return_loss=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) min_encoding_indices = torch.argmin(distances, axis=1).unsqueeze(1) min_encodings = torch.zeros(min_encoding_indices.shape[0], self.num_embeddings).to(hidden_states) min_encodings.scatter_(1, min_encoding_indices, 1) z_q = torch.matmul(min_encodings, self.embedding.weight).view(hidden_states.shape) min_encoding_indices = min_encoding_indices.reshape(hidden_states.shape[0], -1) loss = None if return_loss: loss = torch.mean((z_q.detach() - hidden_states) ** 2) + self.commitment_cost * torch.mean((z_q - hidden_states.detach()) ** 2) z_q = hidden_states + (z_q - hidden_states).detach() z_q = z_q.permute(0, 3, 1, 2).contiguous() return (z_q, min_encoding_indices, loss) def compute_distances(self, hidden_states): hidden_states_flattended = hidden_states.reshape((-1, self.embedding_dim)) emb_weights = self.embedding.weight.t() inputs_norm_sq = hidden_states_flattended.pow(2.0).sum(dim=1, keepdim=True) codebook_t_norm_sq = emb_weights.pow(2.0).sum(dim=0, keepdim=True) distances = torch.addmm(inputs_norm_sq + codebook_t_norm_sq, hidden_states_flattended, emb_weights, alpha=-2.0) return distances def get_codebook_entry(self, indices): (batch, num_tokens) = indices.shape z_q = self.embedding(indices) z_q = z_q.reshape(batch, int(math.sqrt(num_tokens)), int(math.sqrt(num_tokens)), -1).permute(0, 3, 1, 2) return z_q def get_soft_code(self, hidden_states, temp=1.0, stochastic=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) soft_code = F.softmax(-distances / temp, dim=-1) if stochastic: code = torch.multinomial(soft_code, 1) else: code = distances.argmin(dim=-1) code = code.reshape(hidden_states.shape[0], -1) (batch, num_tokens) = code.shape soft_code = soft_code.reshape(batch, num_tokens, -1) return (soft_code, code) def get_code(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) indices = torch.argmin(distances, axis=1).unsqueeze(1) indices = indices.reshape(hidden_states.shape[0], -1) return indices class MaskGitVQGAN(ModelMixin, ConfigMixin): @register_to_config def __init__(self, resolution: int=256, num_channels: int=3, hidden_channels: int=128, channel_mult: Tuple=(1, 1, 2, 2, 4), num_res_blocks: int=2, attn_resolutions: int=(16,), z_channels: int=256, num_embeddings: int=1024, quantized_embed_dim: int=256, dropout: float=0.0, resample_with_conv: bool=True, commitment_cost: float=0.25): super().__init__() self.config.num_resolutions = len(channel_mult) self.config.reduction_factor = 2 ** (self.config.num_resolutions - 1) self.config.latent_size = resolution // self.config.reduction_factor self.encoder = Encoder(self.config) self.decoder = Decoder(self.config) self.quantize = VectorQuantizer(self.config.num_embeddings, self.config.quantized_embed_dim, self.config.commitment_cost) def encode(self, pixel_values, return_loss=False): hidden_states = self.encoder(pixel_values) (quantized_states, codebook_indices, codebook_loss) = self.quantize(hidden_states, return_loss) output = (quantized_states, codebook_indices) if return_loss: output = output + (codebook_loss,) return output def decode(self, quantized_states): reconstructed_pixel_values = self.decoder(quantized_states) return reconstructed_pixel_values def decode_code(self, codebook_indices): quantized_states = self.quantize.get_codebook_entry(codebook_indices) reconstructed_pixel_values = self.decode(quantized_states) return reconstructed_pixel_values def get_soft_code(self, pixel_values, temp=1.0, stochastic=False): hidden_states = self.encoder(pixel_values) (soft_code, codebook_indices) = self.quantize.get_soft_code(hidden_states, temp=temp, stochastic=stochastic) return (soft_code, codebook_indices) def get_code(self, pixel_values): hidden_states = self.encoder(pixel_values) codebook_indices = self.quantize.get_code(hidden_states) return codebook_indices def forward(self, pixel_values, return_loss=False): hidden_states = self.encoder(pixel_values) (quantized_states, codebook_indices, codebook_loss) = self.quantize(hidden_states, return_loss) reconstructed_pixel_values = self.decode(quantized_states) outputs = (reconstructed_pixel_values, quantized_states, codebook_indices) if return_loss: outputs = outputs + (codebook_loss,) return outputs # File: open-muse-main/muse/modeling_movq.py import math from typing import Callable, Optional import torch import torch.nn as nn import torch.nn.functional as F from .modeling_utils import ConfigMixin, ModelMixin, register_to_config try: import xformers.ops as xops is_xformers_available = True except ImportError: is_xformers_available = False class SpatialNorm(nn.Module): def __init__(self, f_channels, zq_channels, norm_layer=nn.GroupNorm, freeze_norm_layer=False, add_conv=False, **norm_layer_params): super().__init__() self.norm_layer = norm_layer(num_channels=f_channels, **norm_layer_params) if freeze_norm_layer: for p in self.norm_layer.parameters: p.requires_grad = False self.add_conv = add_conv if self.add_conv: self.conv = nn.Conv2d(zq_channels, zq_channels, kernel_size=3, stride=1, padding=1) self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) def forward(self, f, zq): f_size = f.shape[-2:] zq = F.interpolate(zq, size=f_size, mode='nearest') if self.add_conv: zq = self.conv(zq) norm_f = self.norm_layer(f) new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) return new_f def Normalize(in_channels, zq_ch, add_conv): return SpatialNorm(in_channels, zq_ch, norm_layer=nn.GroupNorm, freeze_norm_layer=False, add_conv=add_conv, num_groups=32, eps=1e-06, affine=True) class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = F.interpolate(x, scale_factor=2.0, mode='nearest') if self.with_conv: x = self.conv(x) return x class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = (0, 1, 0, 1) x = F.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = F.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__(self, in_channels, out_channels=None, conv_shortcut=False, dropout=0.0, zq_ch=None, add_conv=False): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut if zq_ch: self.norm1 = Normalize(in_channels, zq_ch, add_conv=add_conv) else: self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if zq_ch: self.norm2 = Normalize(out_channels, zq_ch, add_conv=add_conv) else: self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-06, affine=True) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, hidden_states, zq=None): residual = hidden_states if zq is not None: hidden_states = self.norm1(hidden_states, zq) else: hidden_states = self.norm1(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv1(hidden_states) if zq is not None: hidden_states = self.norm2(hidden_states, zq) else: hidden_states = self.norm2(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.in_channels != self.out_channels: if self.use_conv_shortcut: residual = self.conv_shortcut(residual) else: residual = self.nin_shortcut(residual) return hidden_states + residual class AttnBlock(nn.Module): def __init__(self, in_channels, zq_ch=None, add_conv=False): super().__init__() self.in_channels = in_channels if zq_ch: self.norm = Normalize(in_channels, zq_ch, add_conv=add_conv) else: self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.q = nn.Linear(in_channels, in_channels) self.k = nn.Linear(in_channels, in_channels) self.v = nn.Linear(in_channels, in_channels) self.proj_out = nn.Linear(in_channels, in_channels) self.use_memory_efficient_attention_xformers = False self.xformers_attention_op = None def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable]=None): if use_memory_efficient_attention_xformers and (not is_xformers_available): raise ImportError('Please install xformers to use memory efficient attention') self.use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers self.xformers_attention_op = attention_op def forward(self, hidden_states, zq=None): residual = hidden_states (batch, channel, height, width) = hidden_states.shape if zq is not None: hidden_states = self.norm(hidden_states, zq) else: hidden_states = self.norm(hidden_states) hidden_states = hidden_states.view(batch, channel, height * width).transpose(1, 2) scale = 1.0 / torch.sqrt(torch.tensor(channel, dtype=hidden_states.dtype, device=hidden_states.device)) query = self.q(hidden_states) key = self.k(hidden_states) value = self.v(hidden_states) if self.use_memory_efficient_attention_xformers: hidden_states = xops.memory_efficient_attention(query, key, value, attn_bias=None, op=self.xformers_attention_op) else: attention_scores = torch.baddbmm(torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device), query, key.transpose(-1, -2), beta=0, alpha=scale) attention_probs = torch.softmax(attention_scores.float(), dim=-1).type(attention_scores.dtype) hidden_states = torch.bmm(attention_probs, value) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.transpose(-1, -2).view(batch, channel, height, width) return hidden_states + residual class UpsamplingBlock(nn.Module): def __init__(self, config, curr_res: int, block_idx: int, zq_ch: int): super().__init__() self.config = config self.block_idx = block_idx self.curr_res = curr_res if self.block_idx == self.config.num_resolutions - 1: block_in = self.config.hidden_channels * self.config.channel_mult[-1] else: block_in = self.config.hidden_channels * self.config.channel_mult[self.block_idx + 1] block_out = self.config.hidden_channels * self.config.channel_mult[self.block_idx] res_blocks = [] attn_blocks = [] for _ in range(self.config.num_res_blocks + 1): res_blocks.append(ResnetBlock(block_in, block_out, zq_ch=zq_ch, dropout=self.config.dropout)) block_in = block_out if self.curr_res in self.config.attn_resolutions: attn_blocks.append(AttnBlock(block_in, zq_ch=zq_ch)) self.block = nn.ModuleList(res_blocks) self.attn = nn.ModuleList(attn_blocks) self.upsample = None if self.block_idx != 0: self.upsample = Upsample(block_in, self.config.resample_with_conv) def forward(self, hidden_states, zq): for (i, res_block) in enumerate(self.block): hidden_states = res_block(hidden_states, zq) if len(self.attn) > 1: hidden_states = self.attn[i](hidden_states, zq) if self.upsample is not None: hidden_states = self.upsample(hidden_states) return hidden_states class DownsamplingBlock(nn.Module): def __init__(self, config, curr_res: int, block_idx: int): super().__init__() self.config = config self.curr_res = curr_res self.block_idx = block_idx in_channel_mult = (1,) + tuple(self.config.channel_mult) block_in = self.config.hidden_channels * in_channel_mult[self.block_idx] block_out = self.config.hidden_channels * self.config.channel_mult[self.block_idx] res_blocks = nn.ModuleList() attn_blocks = nn.ModuleList() for _ in range(self.config.num_res_blocks): res_blocks.append(ResnetBlock(block_in, block_out, dropout=self.config.dropout)) block_in = block_out if self.curr_res in self.config.attn_resolutions: attn_blocks.append(AttnBlock(block_in)) self.block = res_blocks self.attn = attn_blocks self.downsample = None if self.block_idx != self.config.num_resolutions - 1: self.downsample = Downsample(block_in, self.config.resample_with_conv) def forward(self, hidden_states): for (i, res_block) in enumerate(self.block): hidden_states = res_block(hidden_states) if len(self.attn) > 1: hidden_states = self.attn[i](hidden_states) if self.downsample is not None: hidden_states = self.downsample(hidden_states) return hidden_states class MidBlock(nn.Module): def __init__(self, config, in_channels: int, zq_ch=None, dropout: float=0.0): super().__init__() self.config = config self.in_channels = in_channels self.dropout = dropout self.block_1 = ResnetBlock(self.in_channels, self.in_channels, dropout=self.dropout, zq_ch=zq_ch) self.attn_1 = AttnBlock(self.in_channels, zq_ch=zq_ch) self.block_2 = ResnetBlock(self.in_channels, self.in_channels, dropout=self.dropout, zq_ch=zq_ch) def forward(self, hidden_states, zq=None): hidden_states = self.block_1(hidden_states, zq) hidden_states = self.attn_1(hidden_states, zq) hidden_states = self.block_2(hidden_states, zq) return hidden_states class Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.conv_in = nn.Conv2d(self.config.num_channels, self.config.hidden_channels, kernel_size=3, stride=1, padding=1) curr_res = self.config.resolution downsample_blocks = [] for i_level in range(self.config.num_resolutions): downsample_blocks.append(DownsamplingBlock(self.config, curr_res, block_idx=i_level)) if i_level != self.config.num_resolutions - 1: curr_res = curr_res // 2 self.down = nn.ModuleList(downsample_blocks) mid_channels = self.config.hidden_channels * self.config.channel_mult[-1] self.mid = MidBlock(config, mid_channels, dropout=self.config.dropout) self.norm_out = nn.GroupNorm(num_groups=32, num_channels=mid_channels, eps=1e-06, affine=True) self.conv_out = nn.Conv2d(mid_channels, self.config.z_channels, kernel_size=3, stride=1, padding=1) def forward(self, pixel_values): hidden_states = self.conv_in(pixel_values) for block in self.down: hidden_states = block(hidden_states) hidden_states = self.mid(hidden_states) hidden_states = self.norm_out(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class MoVQDecoder(nn.Module): def __init__(self, config): super().__init__() self.config = config block_in = self.config.hidden_channels * self.config.channel_mult[self.config.num_resolutions - 1] curr_res = self.config.resolution // 2 ** (self.config.num_resolutions - 1) self.z_shape = (1, self.config.z_channels, curr_res, curr_res) self.conv_in = nn.Conv2d(self.config.z_channels, block_in, kernel_size=3, stride=1, padding=1) self.mid = MidBlock(config, block_in, zq_ch=self.config.quantized_embed_dim, dropout=self.config.dropout) upsample_blocks = [] for i_level in reversed(range(self.config.num_resolutions)): upsample_blocks.append(UpsamplingBlock(self.config, curr_res, block_idx=i_level, zq_ch=self.config.quantized_embed_dim)) if i_level != 0: curr_res = curr_res * 2 self.up = nn.ModuleList(list(reversed(upsample_blocks))) block_out = self.config.hidden_channels * self.config.channel_mult[0] self.norm_out = Normalize(block_out, self.config.quantized_embed_dim, False) self.conv_out = nn.Conv2d(block_out, self.config.num_channels, kernel_size=3, stride=1, padding=1) def forward(self, hidden_states, zq): hidden_states = self.conv_in(hidden_states) hidden_states = self.mid(hidden_states, zq) for block in reversed(self.up): hidden_states = block(hidden_states, zq) hidden_states = self.norm_out(hidden_states, zq) hidden_states = F.silu(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost, legacy=True): super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.commitment_cost = commitment_cost self.legacy = legacy self.embedding = nn.Embedding(num_embeddings, embedding_dim) self.embedding.weight.data.uniform_(-1.0 / num_embeddings, 1.0 / num_embeddings) def forward(self, hidden_states, return_loss=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) min_encoding_indices = torch.argmin(distances, axis=1).unsqueeze(1) min_encodings = torch.zeros(min_encoding_indices.shape[0], self.num_embeddings).to(hidden_states) min_encodings.scatter_(1, min_encoding_indices, 1) z_q = torch.matmul(min_encodings, self.embedding.weight).view(hidden_states.shape) min_encoding_indices = min_encoding_indices.reshape(hidden_states.shape[0], -1) loss = None if return_loss: if not self.legacy: loss = self.beta * torch.mean((z_q.detach() - hidden_states) ** 2) + torch.mean((z_q - hidden_states.detach()) ** 2) else: loss = torch.mean((z_q.detach() - hidden_states) ** 2) + self.beta * torch.mean((z_q - hidden_states.detach()) ** 2) z_q = hidden_states + (z_q - hidden_states).detach() z_q = z_q.permute(0, 3, 1, 2).contiguous() return (z_q, min_encoding_indices, loss) def compute_distances(self, hidden_states): hidden_states_flattended = hidden_states.reshape((-1, self.embedding_dim)) return torch.cdist(hidden_states_flattended, self.embedding.weight) def get_codebook_entry(self, indices): (batch, num_tokens) = indices.shape z_q = self.embedding(indices) z_q = z_q.reshape(batch, int(math.sqrt(num_tokens)), int(math.sqrt(num_tokens)), -1).permute(0, 3, 1, 2) return z_q def get_soft_code(self, hidden_states, temp=1.0, stochastic=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) soft_code = F.softmax(-distances / temp, dim=-1) if stochastic: code = torch.multinomial(soft_code, 1) else: code = distances.argmin(dim=-1) code = code.reshape(hidden_states.shape[0], -1) (batch, num_tokens) = code.shape soft_code = soft_code.reshape(batch, num_tokens, -1) return (soft_code, code) def get_code(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) indices = torch.argmin(distances, axis=1).unsqueeze(1) indices = indices.reshape(hidden_states.shape[0], -1) return indices class MOVQ(ModelMixin, ConfigMixin): @register_to_config def __init__(self, resolution: int=256, num_channels=3, out_channels=3, hidden_channels=128, channel_mult=(1, 2, 2, 4), num_res_blocks=2, attn_resolutions=(32,), z_channels=4, double_z=False, num_embeddings=16384, quantized_embed_dim=4, dropout=0.0, resample_with_conv: bool=True, commitment_cost: float=0.25): super().__init__() self.config.num_resolutions = len(channel_mult) self.config.reduction_factor = 2 ** (self.config.num_resolutions - 1) self.config.latent_size = resolution // self.config.reduction_factor self.encoder = Encoder(self.config) self.decoder = MoVQDecoder(self.config) self.quantize = VectorQuantizer(num_embeddings, quantized_embed_dim, commitment_cost=commitment_cost) self.quant_conv = torch.nn.Conv2d(z_channels, quantized_embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(quantized_embed_dim, z_channels, 1) def encode(self, pixel_values, return_loss=False): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) (quantized_states, codebook_indices, codebook_loss) = self.quantize(hidden_states, return_loss) output = (quantized_states, codebook_indices) if return_loss: output = output + (codebook_loss,) return output def decode(self, quant): quant2 = self.post_quant_conv(quant) dec = self.decoder(quant2, quant) return dec def decode_code(self, codebook_indices): quantized_states = self.quantize.get_codebook_entry(codebook_indices) reconstructed_pixel_values = self.decode(quantized_states) return reconstructed_pixel_values def get_code(self, pixel_values): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) codebook_indices = self.quantize.get_code(hidden_states) return codebook_indices def forward(self, pixel_values, return_loss=False): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) (quantized_states, codebook_indices, codebook_loss) = self.quantize(hidden_states, return_loss) reconstructed_pixel_values = self.decode(quantized_states) output = (reconstructed_pixel_values, codebook_indices) if return_loss: output = output + (codebook_loss,) return output # File: open-muse-main/muse/modeling_paella_vq.py import math import torch import torch.nn.functional as F from torch import nn from .modeling_utils import ConfigMixin, ModelMixin, register_to_config class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost=0.25): super().__init__() self.num_embeddings = num_embeddings self.codebook_dim = embedding_dim self.commitment_cost = commitment_cost self.codebook = nn.Embedding(num_embeddings, embedding_dim) self.codebook.weight.data.uniform_(-1.0 / num_embeddings, 1.0 / num_embeddings) def forward(self, hidden_states, return_loss=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) min_encoding_indices = torch.argmin(distances, axis=1).unsqueeze(1) min_encodings = torch.zeros(min_encoding_indices.shape[0], self.num_embeddings).to(hidden_states) min_encodings.scatter_(1, min_encoding_indices, 1) z_q = torch.matmul(min_encodings, self.codebook.weight).view(hidden_states.shape) min_encoding_indices = min_encoding_indices.reshape(hidden_states.shape[0], -1) loss = None if return_loss: loss = torch.mean((z_q.detach() - hidden_states) ** 2) + self.commitment_cost * torch.mean((z_q - hidden_states.detach()) ** 2) z_q = hidden_states + (z_q - hidden_states).detach() z_q = z_q.permute(0, 3, 1, 2).contiguous() return (z_q, min_encoding_indices, loss) def compute_distances(self, hidden_states): hidden_states_flattended = hidden_states.reshape((-1, self.codebook_dim)) return torch.cdist(hidden_states_flattended, self.codebook.weight) def get_codebook_entry(self, indices): (batch, num_tokens) = indices.shape z_q = self.codebook(indices) z_q = z_q.reshape(batch, int(math.sqrt(num_tokens)), int(math.sqrt(num_tokens)), -1).permute(0, 3, 1, 2) return z_q def get_soft_code(self, hidden_states, temp=1.0, stochastic=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) soft_code = F.softmax(-distances / temp, dim=-1) if stochastic: code = torch.multinomial(soft_code, 1) else: code = distances.argmin(dim=-1) code = code.reshape(hidden_states.shape[0], -1) (batch, num_tokens) = code.shape soft_code = soft_code.reshape(batch, num_tokens, -1) return (soft_code, code) def get_code(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) indices = torch.argmin(distances, axis=1).unsqueeze(1) indices = indices.reshape(hidden_states.shape[0], -1) return indices class ResBlock(nn.Module): def __init__(self, c, c_hidden): super().__init__() self.norm1 = nn.LayerNorm(c, elementwise_affine=False, eps=1e-06) self.depthwise = nn.Sequential(nn.ReplicationPad2d(1), nn.Conv2d(c, c, kernel_size=3, groups=c)) self.norm2 = nn.LayerNorm(c, elementwise_affine=False, eps=1e-06) self.channelwise = nn.Sequential(nn.Linear(c, c_hidden), nn.GELU(), nn.Linear(c_hidden, c)) self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True) def _basic_init(module): if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): torch.nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) self.apply(_basic_init) def _norm(self, x, norm): return norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) def forward(self, x): mods = self.gammas x_temp = self._norm(x, self.norm1) * (1 + mods[0]) + mods[1] x = x + self.depthwise(x_temp) * mods[2] x_temp = self._norm(x, self.norm2) * (1 + mods[3]) + mods[4] x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] return x class PaellaVQModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, levels=2, bottleneck_blocks=12, c_hidden=384, c_latent=4, codebook_size=8192, scale_factor=0.3764): super().__init__() self.c_latent = c_latent self.scale_factor = scale_factor c_levels = [c_hidden // 2 ** i for i in reversed(range(levels))] self.in_block = nn.Sequential(nn.PixelUnshuffle(2), nn.Conv2d(3 * 4, c_levels[0], kernel_size=1)) down_blocks = [] for i in range(levels): if i > 0: down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) block = ResBlock(c_levels[i], c_levels[i] * 4) down_blocks.append(block) down_blocks.append(nn.Sequential(nn.Conv2d(c_levels[-1], c_latent, kernel_size=1, bias=False), nn.BatchNorm2d(c_latent))) self.down_blocks = nn.Sequential(*down_blocks) self.codebook_size = codebook_size self.vquantizer = VectorQuantizer(codebook_size, c_latent) up_blocks = [nn.Sequential(nn.Conv2d(c_latent, c_levels[-1], kernel_size=1))] for i in range(levels): for j in range(bottleneck_blocks if i == 0 else 1): block = ResBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) up_blocks.append(block) if i < levels - 1: up_blocks.append(nn.ConvTranspose2d(c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, padding=1)) self.up_blocks = nn.Sequential(*up_blocks) self.out_block = nn.Sequential(nn.Conv2d(c_levels[0], 3 * 4, kernel_size=1), nn.PixelShuffle(2)) def encode(self, x): x = self.in_block(x) x = self.down_blocks(x) (quantized_states, codebook_indices, codebook_loss) = self.vquantizer(x) quantized_states = quantized_states / self.scale_factor output = (quantized_states, codebook_indices, codebook_loss) return output def decode(self, x): x = x * self.scale_factor x = self.up_blocks(x) x = self.out_block(x) return x def decode_code(self, codebook_indices): x = self.vquantizer.get_codebook_entry(codebook_indices) x = self.up_blocks(x) x = self.out_block(x) return x def get_code(self, pixel_values): x = self.in_block(pixel_values) x = self.down_blocks(x) return self.vquantizer.get_code(x) def forward(self, x, quantize=False): qe = self.encode(x)[0] x = self.decode(qe) return x # File: open-muse-main/muse/modeling_taming_vqgan.py import math from functools import partial from typing import Tuple import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from .modeling_utils import ConfigMixin, ModelMixin, register_to_config class Upsample(nn.Module): def __init__(self, in_channels: int, with_conv: bool): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, hidden_states): hidden_states = torch.nn.functional.interpolate(hidden_states, scale_factor=2.0, mode='nearest') if self.with_conv: hidden_states = self.conv(hidden_states) return hidden_states class Downsample(nn.Module): def __init__(self, in_channels: int, with_conv: bool): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, hidden_states): if self.with_conv: pad = (0, 1, 0, 1) hidden_states = torch.nn.functional.pad(hidden_states, pad, mode='constant', value=0) hidden_states = self.conv(hidden_states) else: hidden_states = torch.nn.functional.avg_pool2d(hidden_states, kernel_size=2, stride=2) return hidden_states class ResnetBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int=None, use_conv_shortcut: bool=False, dropout_prob: float=0.0): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.out_channels_ = self.in_channels if self.out_channels is None else self.out_channels self.use_conv_shortcut = use_conv_shortcut self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.conv1 = nn.Conv2d(self.in_channels, self.out_channels_, kernel_size=3, stride=1, padding=1) self.norm2 = nn.GroupNorm(num_groups=32, num_channels=self.out_channels_, eps=1e-06, affine=True) self.dropout = nn.Dropout(dropout_prob) self.conv2 = nn.Conv2d(self.out_channels_, self.out_channels_, kernel_size=3, stride=(1, 1), padding=1) if self.in_channels != self.out_channels_: if use_conv_shortcut: self.conv_shortcut = nn.Conv2d(self.in_channels, self.out_channels_, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = nn.Conv2d(self.in_channels, self.out_channels_, kernel_size=1, stride=1, padding=0) def forward(self, hidden_states): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.in_channels != self.out_channels_: if self.use_conv_shortcut: residual = self.conv_shortcut(residual) else: residual = self.nin_shortcut(residual) return hidden_states + residual class AttnBlock(nn.Module): def __init__(self, in_channels: int): super().__init__() self.in_channels = in_channels conv = partial(nn.Conv2d, self.in_channels, self.in_channels, kernel_size=1, stride=1, padding=0) self.norm = nn.GroupNorm(num_groups=32, num_channels=self.in_channels, eps=1e-06, affine=True) (self.q, self.k, self.v) = (conv(), conv(), conv()) self.proj_out = conv() def forward(self, hidden_states): residual = hidden_states hidden_states = self.norm(hidden_states) query = self.q(hidden_states) key = self.k(hidden_states) value = self.v(hidden_states) (batch, channels, height, width) = query.shape query = query.reshape((batch, channels, height * width)) query = query.permute(0, 2, 1) key = key.reshape((batch, channels, height * width)) attn_weights = torch.bmm(query, key) attn_weights = attn_weights * int(channels) ** (-0.5) attn_weights = nn.functional.softmax(attn_weights, dim=2) value = value.reshape((batch, channels, height * width)) attn_weights = attn_weights.permute(0, 2, 1) hidden_states = torch.bmm(value, attn_weights) hidden_states = hidden_states.reshape((batch, channels, height, width)) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states + residual return hidden_states class UpsamplingBlock(nn.Module): def __init__(self, config, curr_res: int, block_idx: int): super().__init__() self.config = config self.block_idx = block_idx self.curr_res = curr_res if self.block_idx == self.config.num_resolutions - 1: block_in = self.config.hidden_channels * self.config.channel_mult[-1] else: block_in = self.config.hidden_channels * self.config.channel_mult[self.block_idx + 1] block_out = self.config.hidden_channels * self.config.channel_mult[self.block_idx] res_blocks = [] attn_blocks = [] for _ in range(self.config.num_res_blocks + 1): res_blocks.append(ResnetBlock(block_in, block_out, dropout_prob=self.config.dropout)) block_in = block_out if self.curr_res in self.config.attn_resolutions: attn_blocks.append(AttnBlock(block_in)) self.block = nn.ModuleList(res_blocks) self.attn = nn.ModuleList(attn_blocks) self.upsample = None if self.block_idx != 0: self.upsample = Upsample(block_in, self.config.resample_with_conv) def forward(self, hidden_states): for (i, res_block) in enumerate(self.block): hidden_states = res_block(hidden_states) if len(self.attn) > 1: hidden_states = self.attn[i](hidden_states) if self.upsample is not None: hidden_states = self.upsample(hidden_states) return hidden_states class DownsamplingBlock(nn.Module): def __init__(self, config, curr_res: int, block_idx: int): super().__init__() self.config = config self.curr_res = curr_res self.block_idx = block_idx in_channel_mult = (1,) + tuple(self.config.channel_mult) block_in = self.config.hidden_channels * in_channel_mult[self.block_idx] block_out = self.config.hidden_channels * self.config.channel_mult[self.block_idx] res_blocks = nn.ModuleList() attn_blocks = nn.ModuleList() for _ in range(self.config.num_res_blocks): res_blocks.append(ResnetBlock(block_in, block_out, dropout_prob=self.config.dropout)) block_in = block_out if self.curr_res in self.config.attn_resolutions: attn_blocks.append(AttnBlock(block_in)) self.block = res_blocks self.attn = attn_blocks self.downsample = None if self.block_idx != self.config.num_resolutions - 1: self.downsample = Downsample(block_in, self.config.resample_with_conv) def forward(self, hidden_states): for (i, res_block) in enumerate(self.block): hidden_states = res_block(hidden_states) if len(self.attn) > 1: hidden_states = self.attn[i](hidden_states) if self.downsample is not None: hidden_states = self.downsample(hidden_states) return hidden_states class MidBlock(nn.Module): def __init__(self, config, in_channels: int, no_attn: False, dropout: float): super().__init__() self.config = config self.in_channels = in_channels self.no_attn = no_attn self.dropout = dropout self.block_1 = ResnetBlock(self.in_channels, self.in_channels, dropout_prob=self.dropout) if not no_attn: self.attn_1 = AttnBlock(self.in_channels) self.block_2 = ResnetBlock(self.in_channels, self.in_channels, dropout_prob=self.dropout) def forward(self, hidden_states): hidden_states = self.block_1(hidden_states) if not self.no_attn: hidden_states = self.attn_1(hidden_states) hidden_states = self.block_2(hidden_states) return hidden_states class Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.conv_in = nn.Conv2d(self.config.num_channels, self.config.hidden_channels, kernel_size=3, stride=1, padding=1) curr_res = self.config.resolution downsample_blocks = [] for i_level in range(self.config.num_resolutions): downsample_blocks.append(DownsamplingBlock(self.config, curr_res, block_idx=i_level)) if i_level != self.config.num_resolutions - 1: curr_res = curr_res // 2 self.down = nn.ModuleList(downsample_blocks) mid_channels = self.config.hidden_channels * self.config.channel_mult[-1] self.mid = MidBlock(config, mid_channels, self.config.no_attn_mid_block, self.config.dropout) self.norm_out = nn.GroupNorm(num_groups=32, num_channels=mid_channels, eps=1e-06, affine=True) self.conv_out = nn.Conv2d(mid_channels, self.config.z_channels, kernel_size=3, stride=1, padding=1) def forward(self, pixel_values): hidden_states = self.conv_in(pixel_values) for block in self.down: hidden_states = block(hidden_states) hidden_states = self.mid(hidden_states) hidden_states = self.norm_out(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class Decoder(nn.Module): def __init__(self, config): super().__init__() self.config = config block_in = self.config.hidden_channels * self.config.channel_mult[self.config.num_resolutions - 1] curr_res = self.config.resolution // 2 ** (self.config.num_resolutions - 1) self.z_shape = (1, self.config.z_channels, curr_res, curr_res) self.conv_in = nn.Conv2d(self.config.z_channels, block_in, kernel_size=3, stride=1, padding=1) self.mid = MidBlock(config, block_in, self.config.no_attn_mid_block, self.config.dropout) upsample_blocks = [] for i_level in reversed(range(self.config.num_resolutions)): upsample_blocks.append(UpsamplingBlock(self.config, curr_res, block_idx=i_level)) if i_level != 0: curr_res = curr_res * 2 self.up = nn.ModuleList(list(reversed(upsample_blocks))) block_out = self.config.hidden_channels * self.config.channel_mult[0] self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_out, eps=1e-06, affine=True) self.conv_out = nn.Conv2d(block_out, self.config.num_channels, kernel_size=3, stride=1, padding=1) def forward(self, hidden_states): hidden_states = self.conv_in(hidden_states) hidden_states = self.mid(hidden_states) for block in reversed(self.up): hidden_states = block(hidden_states) hidden_states = self.norm_out(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost): super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.commitment_cost = commitment_cost self.embedding = nn.Embedding(num_embeddings, embedding_dim) self.embedding.weight.data.uniform_(-1.0 / num_embeddings, 1.0 / num_embeddings) def forward(self, hidden_states, return_loss=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) min_encoding_indices = torch.argmin(distances, axis=1).unsqueeze(1) min_encodings = torch.zeros(min_encoding_indices.shape[0], self.num_embeddings).to(hidden_states) min_encodings.scatter_(1, min_encoding_indices, 1) z_q = torch.matmul(min_encodings, self.embedding.weight).view(hidden_states.shape) min_encoding_indices = min_encoding_indices.reshape(hidden_states.shape[0], -1) loss = None if return_loss: loss = torch.mean((z_q.detach() - hidden_states) ** 2) + self.commitment_cost * torch.mean((z_q - hidden_states.detach()) ** 2) z_q = hidden_states + (z_q - hidden_states).detach() z_q = z_q.permute(0, 3, 1, 2).contiguous() return (z_q, min_encoding_indices, loss) def compute_distances(self, hidden_states): hidden_states_flattended = hidden_states.reshape((-1, self.embedding_dim)) emb_weights = self.embedding.weight.t() inputs_norm_sq = hidden_states_flattended.pow(2.0).sum(dim=1, keepdim=True) codebook_t_norm_sq = emb_weights.pow(2.0).sum(dim=0, keepdim=True) distances = torch.addmm(inputs_norm_sq + codebook_t_norm_sq, hidden_states_flattended, emb_weights, alpha=-2.0) return distances def get_codebook_entry(self, indices): (batch, num_tokens) = indices.shape z_q = self.embedding(indices) z_q = z_q.reshape(batch, int(math.sqrt(num_tokens)), int(math.sqrt(num_tokens)), -1).permute(0, 3, 1, 2) return z_q def get_soft_code(self, hidden_states, temp=1.0, stochastic=False): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) soft_code = F.softmax(-distances / temp, dim=-1) if stochastic: code = torch.multinomial(soft_code, 1) else: code = distances.argmin(dim=-1) code = code.reshape(hidden_states.shape[0], -1) (batch, num_tokens) = code.shape soft_code = soft_code.reshape(batch, num_tokens, -1) return (soft_code, code) def get_code(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 3, 1).contiguous() distances = self.compute_distances(hidden_states) indices = torch.argmin(distances, axis=1).unsqueeze(1) indices = indices.reshape(hidden_states.shape[0], -1) return indices class VQGANModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, resolution: int=256, num_channels: int=3, hidden_channels: int=128, channel_mult: Tuple=(1, 1, 2, 2, 4), num_res_blocks: int=2, attn_resolutions: int=(16,), no_attn_mid_block: bool=False, z_channels: int=256, num_embeddings: int=1024, quantized_embed_dim: int=256, dropout: float=0.0, resample_with_conv: bool=True, commitment_cost: float=0.25): super().__init__() self.config.num_resolutions = len(channel_mult) self.config.reduction_factor = 2 ** (self.config.num_resolutions - 1) self.config.latent_size = resolution // self.config.reduction_factor self.encoder = Encoder(self.config) self.decoder = Decoder(self.config) self.quantize = VectorQuantizer(self.config.num_embeddings, self.config.quantized_embed_dim, self.config.commitment_cost) self.quant_conv = nn.Conv2d(self.config.z_channels, self.config.quantized_embed_dim, kernel_size=1) self.post_quant_conv = nn.Conv2d(self.config.quantized_embed_dim, self.config.z_channels, kernel_size=1) def encode(self, pixel_values, return_loss=False): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) (quantized_states, codebook_indices, codebook_loss) = self.quantize(hidden_states, return_loss) output = (quantized_states, codebook_indices) if return_loss: output = output + (codebook_loss,) return output def decode(self, quantized_states): hidden_states = self.post_quant_conv(quantized_states) reconstructed_pixel_values = self.decoder(hidden_states) return reconstructed_pixel_values def decode_code(self, codebook_indices): quantized_states = self.quantize.get_codebook_entry(codebook_indices) reconstructed_pixel_values = self.decode(quantized_states) return reconstructed_pixel_values def get_code(self, pixel_values): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) codebook_indices = self.quantize.get_code(hidden_states) return codebook_indices def forward(self, pixel_values, return_loss=False): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) (quantized_states, codebook_indices, codebook_loss) = self.quantize(hidden_states, return_loss) reconstructed_pixel_values = self.decode(quantized_states) outputs = (reconstructed_pixel_values, quantized_states, codebook_indices) if return_loss: outputs = outputs + (codebook_loss,) return outputs # File: open-muse-main/muse/modeling_transformer.py import math from functools import partial from typing import Callable, Optional import numpy as np import torch import torch.nn.functional as F from einops import rearrange from torch import nn from torch.utils.checkpoint import checkpoint from tqdm import tqdm from .modeling_transformer_v2 import MaskGiTUViT_v2 from .modeling_utils import ConfigMixin, ModelMixin, register_to_config from .sampling import cosine_schedule, gumbel_sample, mask_by_random_topk, top_k try: import xformers.ops as xops is_xformers_available = True except ImportError: is_xformers_available = False MaskGiTUViT = MaskGiTUViT_v2 def uniform(shape, min=0, max=1, device=None): return torch.zeros(shape, device=device).float().uniform_(0, 1) def prob_mask_like(shape, prob, device=None): if prob == 1: return torch.ones(shape, device=device, dtype=torch.bool) elif prob == 0: return torch.zeros(shape, device=device, dtype=torch.bool) else: return uniform(shape, device=device) < prob def make_attention_mask(query_input: torch.Tensor, key_input: torch.Tensor, pairwise_fn: Callable=torch.mul) -> torch.Tensor: mask = pairwise_fn(torch.unsqueeze(query_input, axis=-1), torch.unsqueeze(key_input, axis=-2)) mask = torch.unsqueeze(mask, axis=-3) return (1.0 - mask).type(torch.bool) try: from apex.normalization import FusedRMSNorm as RMSNorm except Exception: class RMSNorm(nn.Module): def __init__(self, normalized_shape, eps=1e-06, elementwise_affine=True): super().__init__() self.elementwise_affine = elementwise_affine if elementwise_affine: self.weight = nn.Parameter(torch.ones(normalized_shape)) self.variance_epsilon = eps def forward(self, input): input_dtype = input.dtype variance = input.to(torch.float32).pow(2).mean(-1, keepdim=True) input = input * torch.rsqrt(variance + self.variance_epsilon) if self.elementwise_affine: if self.weight.dtype in [torch.float16, torch.bfloat16]: input = input.to(self.weight.dtype) input = input * self.weight else: input = input.to(input_dtype) return input def sinusoidal_enocde(features, embedding_dim, max_positions=10000): half_dim = embedding_dim // 2 emb = math.log(max_positions) / half_dim emb = torch.arange(0, half_dim, device=features.device, dtype=torch.float32).mul(-emb).exp() emb = features[:, None] * emb[None, :] emb = torch.cat([emb.cos(), emb.sin()], dim=1) if embedding_dim % 2 == 1: emb = nn.functional.pad(emb, (0, 1), mode='constant') return emb class LayerNorm(nn.Module): def __init__(self, dim, eps=1e-05, use_bias=False, elementwise_affine=True): super().__init__() self.dim = dim if elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) self.bias = nn.Parameter(torch.zeros(dim)) if use_bias else None else: self.weight = None self.bias = None self.eps = eps def forward(self, x): return F.layer_norm(x, (self.dim,), self.weight, self.bias, self.eps) class AdaLNModulation(nn.Module): def __init__(self, cond_embed_dim, hidden_size, use_bias=False): super().__init__() self.mapper = nn.Linear(cond_embed_dim, hidden_size * 2, bias=use_bias) def forward(self, hidden_states, cond_embeds): cond_embeds = F.silu(cond_embeds) (scale, shift) = self.mapper(cond_embeds).chunk(2, dim=1) if hidden_states.dim() > 3: (scale, shift) = (scale[:, :, None, None], shift[:, :, None, None]) else: (scale, shift) = (scale[:, None], shift[:, None]) return hidden_states * (1 + scale) + shift class Attention(nn.Module): def __init__(self, hidden_size, num_heads, encoder_hidden_size=None, attention_dropout=0.0, use_bias=False): super().__init__() self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads self.attention_dropout = attention_dropout if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=use_bias) kv_hidden_size = self.hidden_size if encoder_hidden_size is None else encoder_hidden_size self.key = nn.Linear(kv_hidden_size, self.hidden_size, bias=use_bias) self.value = nn.Linear(kv_hidden_size, self.hidden_size, bias=use_bias) self.out = nn.Linear(self.hidden_size, self.hidden_size, bias=use_bias) self.dropout = nn.Dropout(attention_dropout) self.use_memory_efficient_attention_xformers = False self.xformers_attention_op = None def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable]=None): if use_memory_efficient_attention_xformers and (not is_xformers_available): raise ImportError('Please install xformers to use memory efficient attention') self.use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers self.xformers_attention_op = attention_op def forward(self, hidden_states, encoder_hidden_states=None, encoder_attention_mask=None): if encoder_attention_mask is not None and self.use_memory_efficient_attention_xformers: raise ValueError('Memory efficient attention does not yet support encoder attention mask') context = hidden_states if encoder_hidden_states is None else encoder_hidden_states (batch, q_seq_len, _) = hidden_states.shape kv_seq_len = q_seq_len if encoder_hidden_states is None else encoder_hidden_states.shape[1] query = self.query(hidden_states) key = self.key(context) value = self.value(context) query = query.view(batch, q_seq_len, self.num_heads, self.head_dim) key = key.view(batch, kv_seq_len, self.num_heads, self.head_dim) value = value.view(batch, kv_seq_len, self.num_heads, self.head_dim) if self.use_memory_efficient_attention_xformers: attn_output = xops.memory_efficient_attention(query, key, value, op=self.xformers_attention_op, p=self.attention_dropout if self.training else 0.0) attn_output = attn_output.view(batch, q_seq_len, self.hidden_size) else: attention_mask = None if encoder_attention_mask is not None: src_attn_mask = torch.ones(batch, q_seq_len, dtype=torch.long, device=query.device) attention_mask = make_attention_mask(src_attn_mask, encoder_attention_mask, dtype=query.dtype) attn_output = self.attention(query, key, value, attention_mask) attn_output = self.out(attn_output) return attn_output def attention(self, query, key, value, attention_mask=None): (batch, seq_len) = query.shape[:2] kv_seq_len = key.shape[1] (query, key, value) = map(lambda t: t.transpose(1, 2).contiguous(), (query, key, value)) attn_weights = torch.baddbmm(input=torch.zeros(batch * self.num_heads, seq_len, kv_seq_len, dtype=query.dtype, device=query.device), batch1=query.view(batch * self.num_heads, seq_len, self.head_dim), batch2=key.view(batch * self.num_heads, kv_seq_len, self.head_dim).transpose(1, 2), alpha=1 / self.scale_attn) attn_weights = attn_weights.view(batch, self.num_heads, seq_len, kv_seq_len) if attention_mask is not None: attn_weights = torch.masked_fill(attn_weights, attention_mask, torch.finfo(query.dtype).min) attn_weights = F.softmax(attn_weights, dim=-1) attn_weights = self.dropout(attn_weights) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous().view(batch, seq_len, self.hidden_size) return attn_output class AttentionBlock2D(nn.Module): def __init__(self, hidden_size, num_heads, encoder_hidden_size, attention_dropout=0.0, norm_type='layernorm', layer_norm_eps=1e-06, ln_elementwise_affine=True, use_bias=False): super().__init__() self.hidden_size = hidden_size norm_cls = partial(LayerNorm, use_bias=use_bias) if norm_type == 'layernorm' else RMSNorm self.attn_layer_norm = norm_cls(self.hidden_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) self.attention = Attention(hidden_size, num_heads, attention_dropout=attention_dropout, use_bias=use_bias) self.crossattn_layer_norm = norm_cls(hidden_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) self.crossattention = Attention(hidden_size, num_heads, attention_dropout=attention_dropout, use_bias=use_bias) if encoder_hidden_size != hidden_size: self.kv_mapper = nn.Linear(encoder_hidden_size, hidden_size, bias=use_bias) else: self.kv_mapper = None def forward(self, hidden_states, encoder_hidden_states, encoder_attention_mask=None): (batch_size, channels, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channels, height * width).permute(0, 2, 1) if self.kv_mapper is not None: encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states)) residual = hidden_states hidden_states = self.attn_layer_norm(hidden_states) hidden_states = self.attention(hidden_states, encoder_hidden_states, encoder_attention_mask) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.crossattn_layer_norm(hidden_states) hidden_states = self.crossattention(hidden_states, encoder_hidden_states, encoder_attention_mask) hidden_states = hidden_states + residual hidden_states = hidden_states.permute(0, 2, 1).view(batch_size, channels, height, width) return hidden_states class Norm2D(nn.Module): def __init__(self, dim, eps=1e-05, use_bias=False, norm_type='layernorm', elementwise_affine=True): super().__init__() if norm_type == 'layernorm': self.norm = LayerNorm(dim, eps, use_bias, elementwise_affine=elementwise_affine) elif norm_type == 'rmsnorm': self.norm = RMSNorm(dim, eps, elementwise_affine=elementwise_affine) def forward(self, x): return self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) class GlobalResponseNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-06) return self.gamma * (x * Nx) + self.beta + x class ResBlock(nn.Module): def __init__(self, in_channels, skip_channels=None, kernel_size=3, dropout=0.0, norm_type='layernorm', ln_elementwise_affine=True, add_cond_embeds=False, cond_embed_dim=None, use_bias=False, res_ffn_factor=4, **kwargs): super().__init__() self.depthwise = nn.Conv2d(in_channels + skip_channels, in_channels, kernel_size=kernel_size, padding=kernel_size // 2, groups=in_channels, bias=use_bias) self.norm = Norm2D(in_channels, eps=1e-06, norm_type=norm_type, use_bias=use_bias, elementwise_affine=ln_elementwise_affine) self.channelwise = nn.Sequential(nn.Linear(in_channels, int(in_channels * res_ffn_factor), bias=use_bias), nn.GELU(), GlobalResponseNorm(int(in_channels * res_ffn_factor)), nn.Dropout(dropout), nn.Linear(int(in_channels * res_ffn_factor), in_channels, bias=use_bias)) if add_cond_embeds: self.adaLN_modulation = AdaLNModulation(cond_embed_dim=cond_embed_dim, hidden_size=in_channels, use_bias=use_bias) def forward(self, x, x_skip=None, cond_embeds=None): x_res = x if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.norm(self.depthwise(x)).permute(0, 2, 3, 1) x = self.channelwise(x).permute(0, 3, 1, 2) x = x + x_res if cond_embeds is not None: x = self.adaLN_modulation(x, cond_embeds) return x class ResnetBlockVanilla(nn.Module): def __init__(self, in_channels, out_channels=None, conv_shortcut=False, dropout=0.0, use_bias=False, **kwargs): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias) self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-06, affine=True) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=use_bias) def forward(self, hidden_states, **kwargs): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.in_channels != self.out_channels: if self.use_conv_shortcut: residual = self.conv_shortcut(residual) else: residual = self.nin_shortcut(residual) return residual + hidden_states class DownsampleBlock(nn.Module): def __init__(self, input_channels, output_channels=None, skip_channels=None, num_res_blocks=4, kernel_size=3, res_ffn_factor=4, dropout=0.0, norm_type='layernorm', ln_elementwise_affine=True, add_downsample=True, add_cond_embeds=False, cond_embed_dim=None, has_attention=False, num_heads=None, encoder_hidden_size=None, use_bias=False, **kwargs): super().__init__() self.add_downsample = add_downsample self.has_attention = has_attention if add_downsample: self.downsample = nn.Sequential(Norm2D(input_channels, eps=1e-06, use_bias=use_bias, norm_type=norm_type, elementwise_affine=ln_elementwise_affine), nn.Conv2d(input_channels, output_channels, kernel_size=2, stride=2, bias=use_bias)) self.input_channels = output_channels else: self.input_channels = input_channels self.res_blocks = nn.ModuleList([ResBlock(self.input_channels, skip_channels=skip_channels, kernel_size=kernel_size, dropout=dropout, norm_type=norm_type, ln_elementwise_affine=ln_elementwise_affine, add_cond_embeds=add_cond_embeds, cond_embed_dim=cond_embed_dim, use_bias=use_bias, res_ffn_factor=res_ffn_factor) for _ in range(num_res_blocks)]) if has_attention: self.attention_blocks = nn.ModuleList([AttentionBlock2D(hidden_size=self.input_channels, num_heads=num_heads, encoder_hidden_size=encoder_hidden_size, attention_dropout=dropout, norm_type=norm_type, ln_elementwise_affine=ln_elementwise_affine, use_bias=use_bias) for _ in range(num_res_blocks)]) self.gradient_checkpointing = False def forward(self, x, x_skip=None, cond_embeds=None, encoder_hidden_states=None, **kwargs): if self.add_downsample: x = self.downsample(x) output_states = () for (i, res_block) in enumerate(self.res_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward x = torch.utils.checkpoint.checkpoint(create_custom_forward(res_block), x, x_skip) if self.has_attention: x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.attention_blocks[i]), x, encoder_hidden_states) else: x = res_block(x, x_skip, cond_embeds=cond_embeds) if self.has_attention: x = self.attention_blocks[i](x, encoder_hidden_states) output_states += (x,) return (x, output_states) class UpsampleBlock(nn.Module): def __init__(self, input_channels, output_channels=None, skip_channels=None, num_res_blocks=4, kernel_size=3, res_ffn_factor=4, dropout=0.0, norm_type='layernorm', ln_elementwise_affine=True, add_upsample=True, add_cond_embeds=False, cond_embed_dim=None, has_attention=False, num_heads=None, encoder_hidden_size=None, use_bias=False, **kwargs): super().__init__() self.add_upsample = add_upsample self.has_attention = has_attention self.input_channels = input_channels self.output_channels = output_channels if output_channels is not None else input_channels self.res_blocks = nn.ModuleList([ResBlock(self.input_channels, skip_channels=skip_channels if i == 0 else 0, kernel_size=kernel_size, dropout=dropout, norm_type=norm_type, ln_elementwise_affine=ln_elementwise_affine, add_cond_embeds=add_cond_embeds, cond_embed_dim=cond_embed_dim, use_bias=use_bias, res_ffn_factor=res_ffn_factor) for i in range(num_res_blocks)]) if has_attention: self.attention_blocks = nn.ModuleList([AttentionBlock2D(hidden_size=self.input_channels, num_heads=num_heads, encoder_hidden_size=encoder_hidden_size, attention_dropout=dropout, norm_type=norm_type, ln_elementwise_affine=ln_elementwise_affine, use_bias=use_bias) for _ in range(num_res_blocks)]) if add_upsample: self.upsample = nn.Sequential(Norm2D(self.input_channels, eps=1e-06, norm_type=norm_type, use_bias=use_bias, elementwise_affine=ln_elementwise_affine), nn.ConvTranspose2d(self.input_channels, self.output_channels, kernel_size=2, stride=2, bias=use_bias)) self.gradient_checkpointing = False def forward(self, x, x_skip=None, cond_embeds=None, encoder_hidden_states=None, **kwargs): for (i, res_block) in enumerate(self.res_blocks): x_res = x_skip[0] if i == 0 and x_skip is not None else None if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward x = torch.utils.checkpoint.checkpoint(create_custom_forward(res_block), x, x_res) if self.has_attention: x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.attention_blocks[i]), x, encoder_hidden_states) else: x = res_block(x, x_res, cond_embeds=cond_embeds) if self.has_attention: x = self.attention_blocks[i](x, encoder_hidden_states) if self.add_upsample: x = self.upsample(x) return x class DownsampleBlockVanilla(nn.Module): def __init__(self, input_channels, output_channels=None, num_res_blocks=4, dropout=0.0, add_downsample=True, use_bias=False, **kwargs): super().__init__() self.add_downsample = add_downsample res_blocks = [] for i in range(num_res_blocks): in_channels = input_channels if i == 0 else output_channels res_blocks.append(ResnetBlockVanilla(in_channels=in_channels, out_channels=output_channels, dropout=dropout, use_bias=use_bias)) self.res_blocks = nn.ModuleList(res_blocks) if add_downsample: self.downsample_conv = nn.Conv2d(output_channels, output_channels, 3, stride=2, bias=use_bias) self.gradient_checkpointing = False def forward(self, x, **kwargs): output_states = () for res_block in self.res_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward x = torch.utils.checkpoint.checkpoint(create_custom_forward(res_block), x) else: x = res_block(x) output_states = output_states + (x,) if self.add_downsample: pad = (0, 1, 0, 1) x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.downsample_conv(x) output_states = output_states + (x,) return (x, output_states) class UpsampleBlockVanilla(nn.Module): def __init__(self, input_channels, output_channels, skip_channels=None, num_res_blocks=4, dropout=0.0, add_upsample=True, use_bias=False, **kwargs): super().__init__() self.add_upsample = add_upsample res_blocks = [] for i in range(num_res_blocks): res_skip_channels = input_channels if i == num_res_blocks - 1 else output_channels resnet_in_channels = skip_channels if i == 0 else output_channels res_blocks.append(ResnetBlockVanilla(in_channels=resnet_in_channels + res_skip_channels, out_channels=output_channels, dropout=dropout)) self.res_blocks = nn.ModuleList(res_blocks) if add_upsample: self.upsample_conv = nn.Conv2d(output_channels, output_channels, 3, padding=1) self.gradient_checkpointing = False def forward(self, x, x_skip, **kwargs): for res_block in self.res_blocks: res_hidden_states = x_skip[-1] x_skip = x_skip[:-1] x = torch.cat([x, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward x = torch.utils.checkpoint.checkpoint(create_custom_forward(res_block), x) else: x = res_block(x) if self.add_upsample: if x.shape[0] >= 64: x = x.contiguous() x = F.interpolate(x, scale_factor=2.0, mode='nearest') x = self.upsample_conv(x) return x class FeedForward(nn.Module): def __init__(self, hidden_size, intermediate_size, hidden_dropout=0.0, norm_type='layernorm', layer_norm_eps=1e-05, ln_elementwise_affine=True, use_normformer=True, add_cond_embeds=False, cond_embed_dim=None, use_bias=False, ffn_type='glu'): super().__init__() self.use_normformer = use_normformer self.ffn_type = ffn_type self.pre_mlp_layer_norm = LayerNorm(hidden_size, eps=layer_norm_eps, use_bias=use_bias, elementwise_affine=ln_elementwise_affine) self.wi_0 = nn.Linear(hidden_size, intermediate_size, bias=use_bias) if ffn_type == 'glu': self.wi_1 = nn.Linear(hidden_size, intermediate_size, bias=use_bias) if use_normformer: norm_cls = partial(LayerNorm, use_bias=use_bias) if norm_type == 'layernorm' else RMSNorm self.mid_mlp_layer_norm = norm_cls(intermediate_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) self.wo = nn.Linear(intermediate_size, hidden_size, bias=use_bias) self.dropout = nn.Dropout(hidden_dropout) if add_cond_embeds: self.adaLN_modulation = AdaLNModulation(cond_embed_dim=cond_embed_dim, hidden_size=hidden_size, use_bias=use_bias) def forward(self, hidden_states: torch.FloatTensor, cond_embeds=None) -> torch.FloatTensor: hidden_states = self.pre_mlp_layer_norm(hidden_states) if cond_embeds is not None: hidden_states = self.adaLN_modulation(hidden_states, cond_embeds) hidden_gelu = F.gelu(self.wi_0(hidden_states)) if self.ffn_type == 'glu': hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear else: hidden_states = hidden_gelu if self.use_normformer: hidden_states = self.mid_mlp_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class TransformerLayer(nn.Module): def __init__(self, hidden_size, intermediate_size, num_attention_heads, encoder_hidden_size=1024, add_cross_attention=False, hidden_dropout=0.0, attention_dropout=0.0, norm_type='layernorm', layer_norm_eps=1e-05, ln_elementwise_affine=True, use_normformer=True, add_cond_embeds=False, cond_embed_dim=None, ffn_type='glu', use_bias=False): super().__init__() self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_attention_heads = num_attention_heads self.use_normformer = use_normformer norm_cls = partial(LayerNorm, use_bias=use_bias) if norm_type == 'layernorm' else RMSNorm self.attn_layer_norm = norm_cls(self.hidden_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) self.attention = Attention(self.hidden_size, self.num_attention_heads, attention_dropout=attention_dropout, use_bias=use_bias) if use_normformer: self.post_attn_layer_norm = norm_cls(self.hidden_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) self.ffn = FeedForward(self.hidden_size, self.intermediate_size, hidden_dropout, norm_type, layer_norm_eps, ln_elementwise_affine, use_normformer, add_cond_embeds, cond_embed_dim, use_bias, ffn_type) if add_cross_attention: self.crossattn_layer_norm = norm_cls(self.hidden_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) self.crossattention = Attention(self.hidden_size, self.num_attention_heads, encoder_hidden_size, attention_dropout, use_bias) if use_normformer: self.post_crossattn_layer_norm = norm_cls(self.hidden_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) if add_cond_embeds: self.self_attn_adaLN_modulation = AdaLNModulation(cond_embed_dim=cond_embed_dim, hidden_size=hidden_size, use_bias=use_bias) if add_cross_attention: self.cross_attn_adaLN_modulation = AdaLNModulation(cond_embed_dim=cond_embed_dim, hidden_size=hidden_size, use_bias=use_bias) def forward(self, hidden_states, encoder_hidden_states=None, encoder_attention_mask=None, cond_embeds=None): residual = hidden_states hidden_states = self.attn_layer_norm(hidden_states) if cond_embeds is not None: hidden_states = self.self_attn_adaLN_modulation(hidden_states, cond_embeds) attention_output = self.attention(hidden_states) if self.use_normformer: attention_output = self.post_attn_layer_norm(attention_output) hidden_states = residual + attention_output if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.crossattn_layer_norm(hidden_states) if cond_embeds is not None: hidden_states = self.cross_attn_adaLN_modulation(hidden_states, cond_embeds) attention_output = self.crossattention(hidden_states, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask) if self.use_normformer: attention_output = self.post_crossattn_layer_norm(attention_output) hidden_states = residual + attention_output residual = hidden_states hidden_states = self.ffn(hidden_states, cond_embeds=cond_embeds) hidden_states = residual + hidden_states return hidden_states class Embed(nn.Module): def __init__(self, vocab_size, embedding_size, hidden_size, hidden_dropout=0.0, max_position_embeddings=512, norm_type='layernorm', layer_norm_eps=1e-05, use_bias=False, layer_norm_embedddings=False, use_embeddings_project=False): super().__init__() self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.hidden_dropout = hidden_dropout self.max_position_embeddings = max_position_embeddings self.layer_norm_embedddings = layer_norm_embedddings self.use_embeddings_project = use_embeddings_project self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_size) self.position_embeddings = nn.Embedding(self.max_position_embeddings, self.embedding_size) self.dropout = nn.Dropout(self.hidden_dropout) if layer_norm_embedddings: norm_cls = partial(LayerNorm, use_bias=use_bias) if norm_type == 'layernorm' else RMSNorm self.embeddings_ln = norm_cls(self.embedding_size, eps=layer_norm_eps) if use_embeddings_project: self.embedding_hidden_mapping = nn.Linear(self.embedding_size, self.hidden_size, bias=use_bias) def forward(self, input_ids): seq_length = input_ids.shape[-1] position_ids = torch.arange(seq_length)[None, :].to(input_ids.device) word_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) input_embeddings = word_embeddings + position_embeddings if self.layer_norm_embedddings: input_embeddings = self.embeddings_ln(input_embeddings) if self.use_embeddings_project: input_embeddings = self.embedding_hidden_mapping(input_embeddings) input_embeddings = self.dropout(input_embeddings) return input_embeddings class MlmLayer(nn.Module): def __init__(self, hidden_size, vocab_size, norm_type='layernorm', layer_norm_eps=1e-05, use_mlm_layernorm=True, use_bias=False): super().__init__() self.hidden_size = hidden_size self.use_mlm_layernorm = use_mlm_layernorm self.mlm_dense = nn.Linear(self.hidden_size, self.hidden_size, bias=use_bias) if use_mlm_layernorm: norm_cls = partial(LayerNorm, use_bias=use_bias) if norm_type == 'layernorm' else RMSNorm self.mlm_ln = norm_cls(self.hidden_size, eps=layer_norm_eps) self.to_logits = nn.Linear(self.hidden_size, vocab_size, bias=use_bias) def forward(self, hidden_states): hidden_states = self.mlm_dense(hidden_states) hidden_states = F.gelu(hidden_states) if self.use_mlm_layernorm: hidden_states = self.mlm_ln(hidden_states) logits = self.to_logits(hidden_states) return logits class ConvEmbed(nn.Module): def __init__(self, vocab_size, embedding_size, hidden_size, patch_size=2, max_position_embeddings=256, norm_type='layernorm', ln_elementwise_affine=True, layer_norm_embedddings=False, layer_norm_eps=1e-05, use_position_embeddings=True, use_bias=False): super().__init__() self.hidden_size = hidden_size self.patch_size = patch_size self.max_position_embeddings = max_position_embeddings self.use_position_embeddings = use_position_embeddings self.layer_norm_embedddings = layer_norm_embedddings self.embeddings = nn.Embedding(vocab_size, embedding_size) norm_cls = partial(LayerNorm, use_bias=use_bias) if norm_type == 'layernorm' else RMSNorm self.layer_norm = norm_cls(embedding_size, eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine) if patch_size > 1: self.pixel_unshuffle = nn.PixelUnshuffle(patch_size) self.conv = nn.Conv2d(embedding_size * patch_size ** 2, hidden_size, kernel_size=1, bias=use_bias) if use_position_embeddings: self.position_embeddings = nn.Embedding(self.max_position_embeddings, hidden_size) if self.layer_norm_embedddings: self.embeddings_ln = Norm2D(hidden_size, eps=layer_norm_eps, norm_type=norm_type, elementwise_affine=ln_elementwise_affine) def forward(self, input_ids): (batch_size, seq_length) = input_ids.shape (height, width) = (int(seq_length ** 0.5), int(seq_length ** 0.5)) input_ids = input_ids.view(-1, height, width) embeddings = self.embeddings(input_ids) embeddings = self.layer_norm(embeddings) embeddings = embeddings.permute(0, 3, 1, 2) if self.patch_size > 1: embeddings = self.pixel_unshuffle(embeddings) embeddings = self.conv(embeddings) if self.use_position_embeddings: embeddings = embeddings.permute(0, 2, 3, 1).view(batch_size, -1, self.hidden_size) position_ids = torch.arange(embeddings.shape[1])[None, :].to(input_ids.device) position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings if self.layer_norm_embedddings: embeddings = self.embeddings_ln(embeddings) return embeddings class ConvMlmLayer(nn.Module): def __init__(self, vocab_size, embedding_size, hidden_size, patch_size=2, norm_type='layernorm', ln_elementwise_affine=True, layer_norm_eps=1e-05, use_bias=False): super().__init__() self.vocab_size = vocab_size self.patch_size = patch_size self.conv1 = nn.Conv2d(hidden_size, embedding_size * patch_size ** 2, kernel_size=1, bias=use_bias) if patch_size > 1: self.pixel_shuffle = nn.PixelShuffle(patch_size) self.layer_norm = Norm2D(embedding_size, norm_type=norm_type, eps=layer_norm_eps, use_bias=use_bias, elementwise_affine=ln_elementwise_affine) self.conv2 = nn.Conv2d(embedding_size, vocab_size, kernel_size=1, bias=use_bias) def forward(self, hidden_states): (batch_size, seq_length, hidden_size) = hidden_states.shape (height, width) = (int(seq_length ** 0.5), int(seq_length ** 0.5)) hidden_states = hidden_states.view(batch_size, height, width, hidden_size).permute(0, 3, 1, 2) hidden_states = self.conv1(hidden_states) if self.patch_size > 1: hidden_states = self.pixel_shuffle(hidden_states) hidden_states = self.layer_norm(hidden_states) logits = self.conv2(hidden_states) logits = logits.permute(0, 2, 3, 1).view(batch_size, -1, self.vocab_size) return logits class MaskGitTransformer(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, vocab_size, hidden_size=768, embedding_size=None, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_dropout=0.1, attention_dropout=0.1, max_position_embeddings=256, add_cross_attention=False, encoder_hidden_size=1024, project_encoder_hidden_states=False, initializer_range=0.02, norm_type='layernorm', layer_norm_eps=1e-05, use_normformer=True, use_encoder_layernorm=True, use_mlm_layer=True, use_mlm_layernorm=True, use_bias=False, codebook_size=1024, num_vq_tokens=256, num_classes=None, use_codebook_size_for_output=False, use_conv_in_out=False, patch_size=1, **kwargs): super().__init__() self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.embedding_size = embedding_size or hidden_size self.register_to_config(mask_token_id=vocab_size - 1) norm_cls = partial(LayerNorm, use_bias=use_bias) if norm_type == 'layernorm' else RMSNorm if use_conv_in_out: self.embed = ConvEmbed(vocab_size, embedding_size, hidden_size, patch_size=patch_size, norm_type=norm_type, layer_norm_eps=layer_norm_eps, use_bias=use_bias) else: self.embed = Embed(self.vocab_size, self.hidden_size, self.hidden_size, self.hidden_dropout, self.max_position_embeddings, use_bias, norm_type, layer_norm_eps) if add_cross_attention is not None and project_encoder_hidden_states: self.encoder_proj = nn.Linear(encoder_hidden_size, hidden_size, bias=use_bias) self.encoder_proj_layer_norm = norm_cls(hidden_size, eps=layer_norm_eps) encoder_hidden_size = hidden_size self.transformer_layers = nn.ModuleList([TransformerLayer(hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, num_attention_heads=self.num_attention_heads, encoder_hidden_size=encoder_hidden_size, add_cross_attention=add_cross_attention, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, norm_type=norm_type, layer_norm_eps=layer_norm_eps, use_normformer=use_normformer, use_bias=use_bias) for _ in range(self.num_hidden_layers)]) if use_encoder_layernorm: self.encoder_layer_norm = norm_cls(self.hidden_size, eps=layer_norm_eps) self.output_size = codebook_size if use_codebook_size_for_output else self.vocab_size if use_mlm_layer: if use_conv_in_out: self.mlm_layer = ConvMlmLayer(self.output_size, embedding_size, hidden_size, patch_size=patch_size, norm_type=norm_type, layer_norm_eps=layer_norm_eps, use_bias=use_bias) else: self.mlm_layer = MlmLayer(self.hidden_size, self.output_size, norm_type, layer_norm_eps, use_mlm_layernorm, use_bias) else: self.to_logits = nn.Linear(self.hidden_size, self.output_size, bias=use_bias) self.gradient_checkpointing = False self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, nn.Linear): nn.init.trunc_normal_(module.weight, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): nn.init.trunc_normal_(module.weight, std=self.config.initializer_range) elif isinstance(module, (nn.LayerNorm, RMSNorm)): if hasattr(module, 'weight') and module.weight is not None: module.weight.data.fill_(1.0) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = True def forward(self, input_ids, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, label_smoothing=0.0, cond_dropout_prob=0.0, **kwargs): if self.config.add_cross_attention and encoder_hidden_states is None: raise ValueError('If `add_cross_attention` is True, `encoder_hidden_states` should be provided.') hidden_states = self.embed(input_ids) if encoder_hidden_states is not None and self.config.project_encoder_hidden_states: encoder_hidden_states = self.encoder_proj(encoder_hidden_states) encoder_hidden_states = self.encoder_proj_layer_norm(encoder_hidden_states) if encoder_hidden_states is not None and self.training and (cond_dropout_prob > 0.0): batch_size = encoder_hidden_states.shape[0] mask = prob_mask_like((batch_size, 1, 1), 1.0 - cond_dropout_prob, encoder_hidden_states.device) encoder_hidden_states = encoder_hidden_states * mask for layer in self.transformer_layers: if self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = checkpoint(create_custom_forward(layer), hidden_states, encoder_hidden_states, encoder_attention_mask) else: hidden_states = layer(hidden_states, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask) if self.config.use_encoder_layernorm: hidden_states = self.encoder_layer_norm(hidden_states) if self.config.use_mlm_layer: logits = self.mlm_layer(hidden_states) else: logits = self.to_logits(hidden_states) if labels is not None: loss = F.cross_entropy(logits.view(-1, self.output_size), labels.view(-1), ignore_index=-100, label_smoothing=label_smoothing) return (logits, loss) return logits def generate(self, input_ids: torch.LongTensor=None, class_ids: torch.LongTensor=None, encoder_hidden_states: torch.FloatTensor=None, temperature=1.0, topk_filter_thres=0.9, can_remask_prev_masked=False, timesteps=18, guidance_scale=3, noise_schedule: Callable=cosine_schedule, use_tqdm=True): mask_token_id = self.config.mask_token_id seq_len = self.config.num_vq_tokens batch_size = len(class_ids) if class_ids is not None else encoder_hidden_states.shape[0] shape = (batch_size, seq_len) if class_ids is not None: class_ids += self.config.codebook_size if input_ids is not None: input_ids = torch.ones(shape, dtype=torch.long, device=self.device) * mask_token_id scores = torch.zeros(shape, dtype=torch.float32, device=self.device) starting_temperature = temperature iterate_over = zip(torch.linspace(0, 1, timesteps, device=self.device), reversed(range(timesteps))) if use_tqdm: iterate_over = tqdm(iterate_over, total=timesteps) for (timestep, steps_until_x0) in iterate_over: rand_mask_prob = noise_schedule(timestep) num_token_masked = max(int((rand_mask_prob * seq_len).item()), 1) masked_indices = scores.topk(num_token_masked, dim=-1).indices input_ids = input_ids.scatter(1, masked_indices, mask_token_id) if class_ids is not None: input_ids = torch.cat([class_ids[:, None], input_ids], dim=1) if encoder_hidden_states is not None and guidance_scale > 0: uncond_encoder_states = torch.zeros_like(encoder_hidden_states) model_input = torch.cat([input_ids] * 2) condition = torch.cat([encoder_hidden_states, uncond_encoder_states]) (cond_logits, uncond_logits) = self(model_input, encoder_hidden_states=condition).chunk(2) cond_logits = cond_logits[..., :self.config.codebook_size] uncond_logits = uncond_logits[..., :self.config.codebook_size] logits = uncond_logits + guidance_scale * (cond_logits - uncond_logits) else: logits = self(input_ids, encoder_hidden_states=encoder_hidden_states) logits = logits[..., :self.config.codebook_size] if class_ids is not None: input_ids = input_ids[:, 1:] logits = logits[:, 1:] filtered_logits = top_k(logits, topk_filter_thres) temperature = starting_temperature * (steps_until_x0 / timesteps) pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) is_mask = input_ids == mask_token_id input_ids = torch.where(is_mask, pred_ids, input_ids) probs_without_temperature = F.softmax(logits, dim=-1) scores = 1 - probs_without_temperature.gather(2, pred_ids[..., None]) scores = rearrange(scores, '... 1 -> ...') return input_ids def generate2(self, input_ids: torch.LongTensor=None, class_ids: torch.LongTensor=None, encoder_hidden_states: torch.FloatTensor=None, negative_embeds: torch.FloatTensor=None, temperature=1.0, timesteps=18, guidance_scale=0, noise_schedule=cosine_schedule, generator: torch.Generator=None, **kwargs): mask_token_id = self.config.mask_token_id seq_len = self.config.num_vq_tokens batch_size = len(class_ids) if class_ids is not None else encoder_hidden_states.shape[0] shape = (batch_size, seq_len) if class_ids is not None: class_ids += self.config.codebook_size if input_ids is None: input_ids = torch.ones(shape, dtype=torch.long, device=self.device) * mask_token_id if encoder_hidden_states is not None and guidance_scale > 0: if negative_embeds is None: uncond_encoder_states = torch.zeros_like(encoder_hidden_states) else: uncond_encoder_states = negative_embeds condition = torch.cat([encoder_hidden_states, uncond_encoder_states]) model_conds = {'encoder_hidden_states': condition} for step in range(timesteps): if class_ids is not None: input_ids = torch.cat([class_ids[:, None], input_ids], dim=1) if encoder_hidden_states is not None and guidance_scale > 0: model_input = torch.cat([input_ids] * 2) (cond_logits, uncond_logits) = self(model_input, **model_conds).chunk(2) cond_logits = cond_logits[..., :self.config.codebook_size] uncond_logits = uncond_logits[..., :self.config.codebook_size] logits = uncond_logits + guidance_scale * (cond_logits - uncond_logits) else: logits = self(input_ids, encoder_hidden_states=encoder_hidden_states) logits = logits[..., :self.config.codebook_size] if class_ids is not None: input_ids = input_ids[:, 1:] logits = logits[:, 1:] probs = logits.softmax(dim=-1) sampled = probs.reshape(-1, logits.size(-1)) sampled_ids = torch.multinomial(sampled, 1, generator=generator)[:, 0].view(*logits.shape[:-1]) unknown_map = input_ids == mask_token_id sampled_ids = torch.where(unknown_map, sampled_ids, input_ids) ratio = 1.0 * (step + 1) / timesteps mask_ratio = noise_schedule(torch.tensor(ratio)) selected_probs = torch.gather(probs, -1, sampled_ids.long()[..., None]) selected_probs = selected_probs.squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max) mask_len = (seq_len * mask_ratio).floor().unsqueeze(0).to(logits.device) mask_len = torch.max(torch.tensor([1], device=logits.device), torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len)) temperature = temperature * (1.0 - ratio) masking = mask_by_random_topk(mask_len, selected_probs, temperature, generator=generator) input_ids = torch.where(masking, mask_token_id, sampled_ids) return sampled_ids # File: open-muse-main/muse/modeling_transformer_v2.py import dataclasses import math import numbers import warnings from dataclasses import dataclass from typing import Callable, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.utils.checkpoint import checkpoint from .modeling_utils import ConfigMixin, ModelMixin from .sampling import cosine_schedule, mask_by_random_topk try: import xformers.ops as xops is_xformers_available = True except ImportError: is_xformers_available = False try: from flash_attn.ops.rms_norm import dropout_add_rms_norm except ImportError: dropout_add_rms_norm = None try: from flash_attn.ops.layer_norm import dropout_add_layer_norm except ImportError: dropout_add_layer_norm = None try: from flash_attn.ops.fused_dense import fused_mlp_func except ImportError: fused_mlp_func = None warnings.simplefilter('once', UserWarning) def sinusoidal_encode(features, embedding_dim, max_positions=10000): half_dim = embedding_dim // 2 emb = math.log(max_positions) / half_dim emb = torch.arange(0, half_dim, device=features.device, dtype=torch.float32).mul(-emb).exp() emb = features[:, None] * emb[None, :] emb = torch.cat([emb.cos(), emb.sin()], dim=1) if embedding_dim % 2 == 1: emb = nn.functional.pad(emb, (0, 1), mode='constant') return emb @dataclass class MaskGiTUViT_v2Config: hidden_size: int = 1024 use_bias: bool = False hidden_dropout: float = 0.0 cond_embed_dim: int = 768 micro_cond_encode_dim: int = 256 micro_cond_embed_dim: int = 1280 encoder_hidden_size: int = 768 vocab_size: int = 8256 mask_token_id: int = 8255 codebook_size: int = 8192 in_channels: int = 768 block_out_channels: Tuple[int] = (768,) num_res_blocks: int = 3 force_down_up_sample: bool = False block_num_heads: int = 12 num_hidden_layers: int = 22 num_attention_heads: int = 16 attention_dropout: float = 0.0 intermediate_size: int = 2816 use_fused_mlp: bool = False norm_type: str = 'rmsnorm' layer_norm_eps: float = 1e-06 ln_elementwise_affine: bool = True use_fused_residual_norm: bool = False add_cond_embeds: bool = True add_micro_cond_embeds: bool = True def config_from_legacy_kwargs(**kwargs): if 'block_num_heads' in kwargs: if isinstance(kwargs['block_num_heads'], (tuple, list)): assert len(kwargs['block_num_heads']) == 1 kwargs['block_num_heads'] = kwargs['block_num_heads'][0] elif isinstance(kwargs['block_num_heads'], int): ... else: assert False config = {} for field in dataclasses.fields(MaskGiTUViT_v2Config): if field.name in kwargs: config[field.name] = kwargs[field.name] config = MaskGiTUViT_v2Config(**config) config.block_out_channels = list(config.block_out_channels) return config class MaskGiTUViT_v2(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True def __init__(self, **kwargs): super().__init__() config = config_from_legacy_kwargs(**kwargs) self.register_to_config(**dataclasses.asdict(config)) self.register_to_config(mask_token_id=self.config.vocab_size - 1) if self.config.use_fused_residual_norm and dropout_add_layer_norm is None: warnings.warn('Cannot use fused layer norm. Please install flash_attn. Falling back to unfused layer norm', UserWarning) self.register_to_config(use_fused_residual_norm=False) assert len(self.config.block_out_channels) == 1 self.output_size = self.config.codebook_size self.encoder_proj = nn.Linear(self.config.encoder_hidden_size, self.config.hidden_size, bias=self.config.use_bias) self.encoder_proj_layer_norm = Norm(self.config.hidden_size, self.config) self.embed = ConvEmbed(self.config) self.cond_embed = nn.Sequential(nn.Linear(self.config.micro_cond_embed_dim + self.config.cond_embed_dim, self.config.hidden_size, bias=self.config.use_bias), nn.SiLU(), nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=self.config.use_bias)) self.down_blocks = nn.ModuleList([DownsampleBlock(self.config.block_out_channels[0], self.config)]) self.project_to_hidden_norm = Norm(self.config.block_out_channels[-1], self.config) self.project_to_hidden = nn.Linear(self.config.block_out_channels[-1], self.config.hidden_size, bias=self.config.use_bias) self.transformer_layers = nn.ModuleList([TransformerLayer(self.config) for _ in range(self.config.num_hidden_layers)]) self.project_from_hidden_norm = Norm(self.config.hidden_size, self.config) self.project_from_hidden = nn.Linear(self.config.hidden_size, self.config.block_out_channels[-1], bias=self.config.use_bias) self.up_blocks = nn.ModuleList([UpsampleBlock(self.config.block_out_channels[0], self.config)]) self.mlm_layer = ConvMlmLayer(self.config) self.gradient_checkpointing = False self.apply(self._init_weights) nn.init.xavier_uniform_(self.embed.conv.weight, 0.02) nn.init.normal_(self.embed.embeddings.weight, std=np.sqrt(1 / self.config.vocab_size)) nn.init.constant_(self.mlm_layer.conv1.weight, 0) self.mlm_layer.conv2.weight.data = self.embed.embeddings.weight.data[:self.config.codebook_size, :, None, None].clone() for m in self.modules(): if isinstance(m, AdaLNModulation): nn.init.constant_(m.mapper.weight, 0) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0) def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): nn.init.trunc_normal_(module.weight, std=0.02) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): nn.init.trunc_normal_(module.weight, std=0.02) elif isinstance(module, (LayerNorm, RMSNorm)): if hasattr(module, 'weight') and module.weight is not None: module.weight.data.fill_(1.0) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() def forward(self, input_ids, encoder_hidden_states, cond_embeds, micro_conds, labels=None, label_smoothing=0.0, loss_weight=None): encoder_hidden_states = self.encoder_proj(encoder_hidden_states) (encoder_hidden_states, _) = self.encoder_proj_layer_norm(encoder_hidden_states) micro_cond_embeds = sinusoidal_encode(micro_conds.flatten(), self.config.micro_cond_encode_dim) micro_cond_embeds = micro_cond_embeds.reshape((input_ids.shape[0], -1)) cond_embeds = torch.cat([cond_embeds, micro_cond_embeds], dim=1) cond_embeds = cond_embeds.to(dtype=self.dtype) cond_embeds = self.cond_embed(cond_embeds).to(encoder_hidden_states.dtype) hidden_states = self.embed(input_ids) hidden_states = self.down_blocks[0](hidden_states, cond_embeds=cond_embeds, encoder_hidden_states=encoder_hidden_states) (batch_size, channels, height, width) = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels) (hidden_states, _) = self.project_to_hidden_norm(hidden_states) hidden_states = self.project_to_hidden(hidden_states) transformer_residual = None for layer in self.transformer_layers: if self.training and self.gradient_checkpointing: layer_ = lambda *args: checkpoint(layer, *args) else: layer_ = layer (hidden_states, transformer_residual) = layer_(hidden_states, encoder_hidden_states, cond_embeds, transformer_residual) hidden_states = hidden_states + transformer_residual (hidden_states, _) = self.project_from_hidden_norm(hidden_states) hidden_states = self.project_from_hidden(hidden_states) hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2) assert len(self.up_blocks) == 1 hidden_states = self.up_blocks[0](hidden_states, cond_embeds=cond_embeds, encoder_hidden_states=encoder_hidden_states) (batch_size, channels, height, width) = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels) logits = self.mlm_layer(hidden_states) if labels is not None: reduction = 'none' if loss_weight is not None else 'mean' loss = F.cross_entropy(logits.view(-1, self.codebook_size), labels.view(-1), ignore_index=-100, label_smoothing=label_smoothing, reduction=reduction) if loss_weight is not None: loss_weight = loss_weight.view(-1) loss = ((loss * loss_weight).sum(dim=-1) / loss_weight.sum(dim=-1)).mean() return (logits, loss) return logits def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value if isinstance(module, (DownsampleBlock, UpsampleBlock)): module.gradient_checkpointing = value def generate(self): assert False def generate2(self, encoder_hidden_states: torch.FloatTensor, cond_embeds: torch.FloatTensor, micro_conds: torch.FloatTensor, empty_embeds: torch.FloatTensor, empty_cond_embeds: torch.FloatTensor, input_ids: torch.LongTensor=None, negative_embeds: torch.FloatTensor=None, negative_cond_embeds: torch.FloatTensor=None, temperature=1.0, timesteps=18, guidance_scale=0, guidance_schedule=None, noise_schedule=cosine_schedule, generator: torch.Generator=None, return_intermediate=False, seq_len=None, use_tqdm=None, topk_filter_thres=None, noise_type=None, predict_all_tokens=None): batch_size = encoder_hidden_states.shape[0] if seq_len is None: seq_len = 256 shape = (batch_size, seq_len) if isinstance(temperature, tuple): temperatures = torch.linspace(temperature[0], temperature[1], timesteps) else: temperatures = torch.linspace(temperature, 0.01, timesteps) if input_ids is None: input_ids = torch.ones(shape, dtype=torch.long, device=self.device) * self.config.mask_token_id if return_intermediate: intermediate = [] if guidance_schedule == 'linear': guidance_scales = torch.linspace(0, guidance_scale, timesteps) elif guidance_schedule == 'cosine': guidance_scales = [] for step in range(timesteps): ratio = 1.0 * (step + 1) / timesteps scale = cosine_schedule(torch.tensor(1 - ratio)) * guidance_scale guidance_scales.append(scale.floor()) guidance_scales = torch.tensor(guidance_scales) else: guidance_scales = torch.ones(timesteps) * guidance_scale if micro_conds.shape[0] == 1: micro_conds = micro_conds.repeat(batch_size, 1).to(input_ids.device) if guidance_scale > 0: if negative_embeds is None: uncond_encoder_states = empty_embeds else: uncond_encoder_states = negative_embeds if uncond_encoder_states.shape[0] == 1: uncond_encoder_states = uncond_encoder_states.expand(batch_size, -1, -1) encoder_hidden_states = torch.cat([encoder_hidden_states, uncond_encoder_states]) if negative_cond_embeds is None: uncond_embeds = empty_cond_embeds else: uncond_embeds = negative_cond_embeds if uncond_embeds.shape[0] == 1: uncond_embeds = uncond_embeds.expand(batch_size, -1) cond_embeds = torch.cat([cond_embeds, uncond_embeds]) micro_conds = torch.cat([micro_conds, micro_conds], dim=0) if use_tqdm: from tqdm.auto import tqdm timesteps_iter = tqdm(range(timesteps)) else: timesteps_iter = range(timesteps) for step in timesteps_iter: if guidance_scale > 0: model_input = torch.cat([input_ids] * 2) model_output = self(model_input, micro_conds=micro_conds, cond_embeds=cond_embeds, encoder_hidden_states=encoder_hidden_states) if guidance_scale > 0: (cond_logits, uncond_logits) = model_output.chunk(2) cond_logits = cond_logits[..., :self.config.codebook_size] uncond_logits = uncond_logits[..., :self.config.codebook_size] logits = uncond_logits + guidance_scales[step] * (cond_logits - uncond_logits) else: logits = model_output logits = logits[..., :self.config.codebook_size] probs = logits.softmax(dim=-1) sampled = probs.reshape(-1, logits.size(-1)) sampled_ids = torch.multinomial(sampled, 1, generator=generator)[:, 0].view(*logits.shape[:-1]) if return_intermediate: intermediate.append(sampled_ids) unknown_map = input_ids == self.config.mask_token_id sampled_ids = torch.where(unknown_map, sampled_ids, input_ids) ratio = 1.0 * (step + 1) / timesteps mask_ratio = noise_schedule(torch.tensor(ratio)) mask_len = (seq_len * mask_ratio).floor().unsqueeze(0).to(logits.device) mask_len = torch.max(torch.tensor([1], device=logits.device), torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len)) selected_probs = torch.gather(probs, -1, sampled_ids.long()[..., None]) selected_probs = selected_probs.squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max) temperature = temperatures[step] masking = mask_by_random_topk(mask_len, selected_probs, temperature, generator=generator) input_ids = torch.where(masking, self.config.mask_token_id, sampled_ids) if return_intermediate: return (sampled_ids, intermediate) return sampled_ids class ConvEmbed(nn.Module): def __init__(self, config: MaskGiTUViT_v2Config): super().__init__() self.embeddings = nn.Embedding(config.vocab_size, config.in_channels) self.layer_norm = Norm(config.in_channels, config) self.conv = nn.Conv2d(config.in_channels, config.block_out_channels[0], kernel_size=1, bias=config.use_bias) def forward(self, input_ids): (batch_size, seq_length) = input_ids.shape (height, width) = (int(seq_length ** 0.5), int(seq_length ** 0.5)) input_ids = input_ids.view(-1, height, width) embeddings = self.embeddings(input_ids) (embeddings, _) = self.layer_norm(embeddings) embeddings = embeddings.permute(0, 3, 1, 2) embeddings = self.conv(embeddings) return embeddings class DownsampleBlock(nn.Module): def __init__(self, channels, config: MaskGiTUViT_v2Config): super().__init__() if config.force_down_up_sample: self.downsample = nn.Sequential(Norm2D(channels, config), nn.Conv2d(channels, channels, kernel_size=2, stride=2, bias=config.use_bias)) else: self.downsample = None self.res_blocks = nn.ModuleList([ResBlock(channels, config) for _ in range(config.num_res_blocks)]) self.attention_blocks = nn.ModuleList([AttentionBlock2D(channels, config) for _ in range(config.num_res_blocks)]) self.gradient_checkpointing = False def forward(self, x, cond_embeds, encoder_hidden_states): if self.downsample is not None: x = self.downsample(x) for (res_block, attention_block) in zip(self.res_blocks, self.attention_blocks): if self.training and self.gradient_checkpointing: res_block_ = lambda *args: checkpoint(res_block, *args) attention_block_ = lambda *args: checkpoint(attention_block, *args) else: res_block_ = res_block attention_block_ = attention_block x = res_block_(x, cond_embeds) x = attention_block_(x, encoder_hidden_states) return x class UpsampleBlock(nn.Module): def __init__(self, channels: int, config: MaskGiTUViT_v2Config): super().__init__() self.res_blocks = nn.ModuleList([ResBlock(channels, config) for i in range(config.num_res_blocks)]) self.attention_blocks = nn.ModuleList([AttentionBlock2D(channels, config) for _ in range(config.num_res_blocks)]) if config.force_down_up_sample: self.upsample = nn.Sequential(Norm2D(channels, config), nn.ConvTranspose2d(channels, channels, kernel_size=2, stride=2, bias=config.use_bias)) else: self.upsample = None self.gradient_checkpointing = False def forward(self, x, cond_embeds, encoder_hidden_states): for (res_block, attention_block) in zip(self.res_blocks, self.attention_blocks): if self.training and self.gradient_checkpointing: res_block_ = lambda *args: checkpoint(res_block, *args) attention_block_ = lambda *args: checkpoint(attention_block, *args) else: res_block_ = res_block attention_block_ = attention_block x = res_block_(x, cond_embeds) x = attention_block_(x, encoder_hidden_states) if self.upsample is not None: x = self.upsample(x) return x class ResBlock(nn.Module): def __init__(self, channels, config: MaskGiTUViT_v2Config, res_ffn_factor=4): super().__init__() self.depthwise = nn.Conv2d(channels, channels, kernel_size=3, padding=1, groups=channels, bias=config.use_bias) self.norm = Norm2D(channels, config) self.channelwise = nn.Sequential(nn.Linear(channels, int(channels * res_ffn_factor), bias=config.use_bias), nn.GELU(), GlobalResponseNorm(int(channels * res_ffn_factor)), nn.Dropout(config.hidden_dropout), nn.Linear(int(channels * res_ffn_factor), channels, bias=config.use_bias)) self.adaLN_modulation = AdaLNModulation(channels, config) def forward(self, x, cond_embeds): x_res = x x = self.norm(self.depthwise(x)).permute(0, 2, 3, 1) x = self.channelwise(x).permute(0, 3, 1, 2) x = x + x_res x = self.adaLN_modulation(x, cond_embeds) return x class Norm2D(nn.Module): def __init__(self, dim, config: MaskGiTUViT_v2Config): super().__init__() self.norm = Norm(dim, config) def forward(self, x): x = x.permute(0, 2, 3, 1) (x, _) = self.norm(x) x = x.permute(0, 3, 1, 2) return x def Norm(dim, config: MaskGiTUViT_v2Config): if config.norm_type == 'layernorm': return LayerNorm(dim, config) elif config.norm_type == 'rmsnorm': return RMSNorm(dim, config) else: assert False class RMSNorm(nn.Module): def __init__(self, dim, config: MaskGiTUViT_v2Config): super().__init__() self.config = config if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if self.config.ln_elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) else: self.weight = None def forward(self, input, residual=None): if self.config.use_fused_residual_norm: if dropout_add_rms_norm is None: raise ImportError('Please install flash_attn to use fused rms norm') return dropout_add_rms_norm(input, residual, self.weight, None, dropout_p=0.0, epsilon=self.config.layer_norm_eps, prenorm=True) else: return unfused_rms_norm(input, residual, self.weight, self.config.layer_norm_eps) def unfused_rms_norm(input, residual, weight, eps): if residual is not None: input = input + residual prenorm_residual = input input_dtype = input.dtype variance = input.to(torch.float32).pow(2).mean(-1, keepdim=True) input = input * torch.rsqrt(variance + eps) if weight is not None: if weight.dtype in [torch.float16, torch.bfloat16]: input = input.to(weight.dtype) input = input * weight else: input = input.to(input_dtype) return (input, prenorm_residual) class LayerNorm(nn.Module): def __init__(self, dim, config: MaskGiTUViT_v2Config): super().__init__() self.config = config if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if self.config.ln_elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) self.bias = nn.Parameter(torch.zeros(dim)) if self.config.use_bias else None else: self.weight = None self.bias = None def forward(self, input, residual=None): if self.config.use_fused_residual_norm: if dropout_add_layer_norm is None: raise ImportError('Please install flash_attn to use fused layer norm') return dropout_add_layer_norm(x0=input, residual=residual, weight=self.weight, bias=self.bias, epsilon=self.config.layer_norm_eps, dropout_p=0.0, prenorm=True) else: return unfused_layer_norm(input, residual, self.dim, self.weight, self.bias, self.config.layer_norm_eps) def unfused_layer_norm(input, residual, dim, weight, bias, eps): if residual is not None: input = input + residual prenorm_residual = input input = F.layer_norm(input, dim, weight, bias, eps) return (input, prenorm_residual) class GlobalResponseNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-06) return self.gamma * (x * Nx) + self.beta + x class TransformerLayer(nn.Module): def __init__(self, config: MaskGiTUViT_v2Config): super().__init__() self.attn_layer_norm = Norm(config.hidden_size, config) self.self_attn_adaLN_modulation = AdaLNModulation(config.hidden_size, config) self.attention = Attention(config.hidden_size, config.hidden_size, config.num_attention_heads, config) self.crossattn_layer_norm = Norm(config.hidden_size, config) self.crossattention = Attention(config.hidden_size, config.hidden_size, config.num_attention_heads, config) self.cross_attn_adaLN_modulation = AdaLNModulation(config.hidden_size, config) self.ffn = FeedForward(config) def forward(self, hidden_states, encoder_hidden_states, cond_embeds, residual=None): (hidden_states, residual) = self.attn_layer_norm(hidden_states, residual=residual) hidden_states = self.self_attn_adaLN_modulation(hidden_states, cond_embeds) hidden_states = self.attention(hidden_states, hidden_states) (hidden_states, residual) = self.crossattn_layer_norm(hidden_states, residual=residual) hidden_states = self.cross_attn_adaLN_modulation(hidden_states, cond_embeds) hidden_states = self.crossattention(hidden_states, encoder_hidden_states) (hidden_states, residual) = self.ffn(hidden_states, cond_embeds=cond_embeds, residual=residual) return (hidden_states, residual) class AttentionBlock2D(nn.Module): def __init__(self, hidden_size: int, config: MaskGiTUViT_v2Config): super().__init__() if config.hidden_size != hidden_size: self.kv_mapper = nn.Linear(config.hidden_size, hidden_size, bias=config.use_bias) else: self.kv_mapper = None encoder_hidden_size = hidden_size self.attn_layer_norm = Norm(hidden_size, config) self.attention = Attention(hidden_size, encoder_hidden_size, config.block_num_heads, config) self.crossattn_layer_norm = Norm(hidden_size, config) self.crossattention = Attention(hidden_size, encoder_hidden_size, config.block_num_heads, config) def forward(self, hidden_states, encoder_hidden_states): (batch_size, channels, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channels, height * width).permute(0, 2, 1) if self.kv_mapper is not None: encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states)) (hidden_states, residual) = self.attn_layer_norm(hidden_states) hidden_states = self.attention(hidden_states, encoder_hidden_states) (hidden_states, residual) = self.crossattn_layer_norm(hidden_states, residual) hidden_states = self.crossattention(hidden_states, encoder_hidden_states) hidden_states = hidden_states + residual hidden_states = hidden_states.permute(0, 2, 1).view(batch_size, channels, height, width) return hidden_states class Attention(nn.Module): def __init__(self, hidden_size: int, context_dim: int, num_heads: int, config: MaskGiTUViT_v2Config): super().__init__() self.config = config self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = self.hidden_size // num_heads if self.hidden_size % self.num_heads != 0: raise ValueError(f'self.hidden_size: {self.hidden_size} must be divisible by self.num_heads: {self.num_heads}') self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=self.config.use_bias) self.key = nn.Linear(context_dim, self.hidden_size, bias=self.config.use_bias) self.value = nn.Linear(context_dim, self.hidden_size, bias=self.config.use_bias) self.out = nn.Linear(self.hidden_size, self.hidden_size, bias=self.config.use_bias) self.dropout = nn.Dropout(self.config.attention_dropout) self.use_memory_efficient_attention_xformers = False self.xformers_attention_op = None def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable]=None): if use_memory_efficient_attention_xformers and (not is_xformers_available): raise ImportError('Please install xformers to use memory efficient attention') self.use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers self.xformers_attention_op = attention_op def forward(self, hidden_states, context): (batch, q_seq_len, _) = hidden_states.shape kv_seq_len = context.shape[1] query = self.query(hidden_states) key = self.key(context) value = self.value(context) query = query.view(batch, q_seq_len, self.num_heads, self.head_dim) key = key.view(batch, kv_seq_len, self.num_heads, self.head_dim) value = value.view(batch, kv_seq_len, self.num_heads, self.head_dim) if self.use_memory_efficient_attention_xformers: attn_output = xops.memory_efficient_attention(query, key, value, op=self.xformers_attention_op, p=self.config.attention_dropout if self.training else 0.0) attn_output = attn_output.view(batch, q_seq_len, self.hidden_size) else: attn_output = self.attention(query, key, value) attn_output = self.out(attn_output) return attn_output def attention(self, query, key, value, attention_mask=None): (batch, seq_len) = query.shape[:2] kv_seq_len = key.shape[1] (query, key, value) = map(lambda t: t.transpose(1, 2).contiguous(), (query, key, value)) attn_weights = torch.baddbmm(input=torch.zeros(batch * self.num_heads, seq_len, kv_seq_len, dtype=query.dtype, device=query.device), batch1=query.view(batch * self.num_heads, seq_len, self.head_dim), batch2=key.view(batch * self.num_heads, kv_seq_len, self.head_dim).transpose(1, 2), alpha=1 / self.scale_attn) attn_weights = attn_weights.view(batch, self.num_heads, seq_len, kv_seq_len) if attention_mask is not None: attn_weights = torch.masked_fill(attn_weights, attention_mask, torch.finfo(query.dtype).min) attn_weights = F.softmax(attn_weights, dim=-1) attn_weights = self.dropout(attn_weights) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous().view(batch, seq_len, self.hidden_size) return attn_output def FeedForward(config: MaskGiTUViT_v2Config): if config.use_fused_mlp: return FusedGeLUFeedForward(config) else: return GLUFeedForward(config) class GLUFeedForward(nn.Module): def __init__(self, config: MaskGiTUViT_v2Config): super().__init__() self.pre_mlp_layer_norm = LayerNorm(config.hidden_size, config) self.adaLN_modulation = AdaLNModulation(config.hidden_size, config) self.wi_0 = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.use_bias) self.wi_1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.use_bias) self.dropout = nn.Dropout(config.hidden_dropout) self.wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.use_bias) def forward(self, hidden_states, cond_embeds, residual=None): (hidden_states, residual) = self.pre_mlp_layer_norm(hidden_states, residual=residual) hidden_states = self.adaLN_modulation(hidden_states, cond_embeds) hidden_gelu = F.gelu(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return (hidden_states, residual) class FusedGeLUFeedForward(nn.Module): def __init__(self, config: MaskGiTUViT_v2Config): super().__init__() self.pre_mlp_layer_norm = LayerNorm(config.hidden_size, config) self.adaLN_modulation = AdaLNModulation(config.hidden_size, config) self.wi_0 = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.use_bias) self.dropout = nn.Dropout(config.hidden_dropout) self.wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.use_bias) def forward(self, hidden_states, cond_embeds, residual=None): if fused_mlp_func is None: raise ImportError('Please install flash_attn to use fused mlp') (hidden_states, residual) = self.pre_mlp_layer_norm(hidden_states, residual=residual) hidden_states = self.adaLN_modulation(hidden_states, cond_embeds) dtype = hidden_states.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype() cuda_ver = tuple(map(int, torch.version.cuda.split('.'))) if torch.cuda.get_device_capability('cuda') == (9, 0): heuristic = -1 elif cuda_ver >= (11, 8): heuristic = 0 elif dtype == torch.float16: heuristic = 1 else: heuristic = -1 hidden_states = fused_mlp_func(hidden_states, self.wi_0.weight, self.wo.weight, self.wi_0.bias, self.wo.bias, activation='gelu_approx', save_pre_act=self.training, return_residual=False, checkpoint_lvl=0, heuristic=heuristic) return (hidden_states, residual) class ConvMlmLayer(nn.Module): def __init__(self, config: MaskGiTUViT_v2Config): super().__init__() self.config = config self.conv1 = nn.Conv2d(self.config.block_out_channels[0], self.config.in_channels, kernel_size=1, bias=self.config.use_bias) self.layer_norm = Norm2D(self.config.in_channels, config) self.conv2 = nn.Conv2d(self.config.in_channels, self.config.codebook_size, kernel_size=1, bias=self.config.use_bias) def forward(self, hidden_states): (batch_size, seq_length, hidden_size) = hidden_states.shape resolution = int(seq_length ** 0.5) hidden_states = hidden_states.view(batch_size, resolution, resolution, hidden_size).permute(0, 3, 1, 2) hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm(hidden_states) logits = self.conv2(hidden_states) logits = logits.permute(0, 2, 3, 1).view(batch_size, -1, self.config.codebook_size) return logits class AdaLNModulation(nn.Module): def __init__(self, hidden_size: int, config: MaskGiTUViT_v2Config): super().__init__() self.mapper = nn.Linear(config.hidden_size, hidden_size * 2, bias=config.use_bias) def forward(self, hidden_states, cond_embeds): cond_embeds = F.silu(cond_embeds) (scale, shift) = self.mapper(cond_embeds).chunk(2, dim=1) if hidden_states.dim() > 3: (scale, shift) = (scale[:, :, None, None], shift[:, :, None, None]) else: (scale, shift) = (scale[:, None], shift[:, None]) return hidden_states * (1 + scale) + shift # File: open-muse-main/muse/modeling_utils.py import functools import inspect import json import os from collections import OrderedDict from functools import partial from pathlib import PosixPath from typing import Any, Callable, Dict, List, Optional, Tuple, Union import accelerate import numpy as np import torch from accelerate.utils import set_module_tensor_to_device from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from requests import HTTPError from torch import Tensor, device from . import __version__, logging logger = logging.get_logger(__name__) hf_cache_home = os.path.expanduser(os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))) default_cache_path = os.path.join(hf_cache_home, 'muse') CONFIG_NAME = 'config.json' WEIGHTS_NAME = 'pytorch_model.bin' SAFETENSORS_WEIGHTS_NAME = 'pytorch_model.safetensors' HUGGINGFACE_CO_RESOLVE_ENDPOINT = 'https://huggingface.co' MUSE_CACHE = default_cache_path MUSE_DYNAMIC_MODULE_NAME = 'myse_modules' HF_MODULES_CACHE = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules')) _LOW_CPU_MEM_USAGE_DEFAULT = True def get_parameter_device(parameter: torch.nn.Module): try: return next(parameter.parameters()).device except StopIteration: def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device def get_parameter_dtype(parameter: torch.nn.Module): try: return next(parameter.parameters()).dtype except StopIteration: def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype def load_state_dict(checkpoint_file: Union[str, os.PathLike]): try: if os.path.basename(checkpoint_file) == WEIGHTS_NAME: return torch.load(checkpoint_file, map_location='cpu') except Exception as e: try: with open(checkpoint_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError(f'Unable to locate the file {checkpoint_file} which is necessary to load this pretrained model. Make sure you have saved the model properly.') from e except (UnicodeDecodeError, ValueError): raise OSError(f"Unable to load weights from checkpoint file for '{checkpoint_file}' at '{checkpoint_file}'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.") def _load_state_dict_into_model(model_to_load, state_dict): state_dict = state_dict.copy() error_msgs = [] def load(module: torch.nn.Module, prefix=''): args = (state_dict, prefix, {}, True, [], [], error_msgs) module._load_from_state_dict(*args) for (name, child) in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model_to_load) return error_msgs def _get_model_file(pretrained_model_name_or_path, *, weights_name, subfolder, cache_dir, force_download, proxies, resume_download, local_files_only, use_auth_token, user_agent, revision): pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): return pretrained_model_name_or_path elif os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): model_file = os.path.join(pretrained_model_name_or_path, weights_name) return model_file elif subfolder is not None and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, weights_name)): model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) return model_file else: raise EnvironmentError(f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.') else: try: model_file = hf_hub_download(pretrained_model_name_or_path, filename=weights_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, subfolder=subfolder, revision=revision) return model_file except RepositoryNotFoundError: raise EnvironmentError(f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo with `use_auth_token` or log in with `huggingface-cli login`.") except RevisionNotFoundError: raise EnvironmentError(f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.") except EntryNotFoundError: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.') except HTTPError as err: raise EnvironmentError(f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}') except ValueError: raise EnvironmentError(f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a file named {weights_name} or \nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.") except EnvironmentError: raise EnvironmentError(f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {weights_name}") class ModelMixin(torch.nn.Module): config_name = CONFIG_NAME _automatically_saved_args = ['_version', '_class_name', '_name_or_path'] _supports_gradient_checkpointing = False def __init__(self): super().__init__() @property def is_gradient_checkpointing(self) -> bool: return any((hasattr(m, 'gradient_checkpointing') and m.gradient_checkpointing for m in self.modules())) def enable_gradient_checkpointing(self): if not self._supports_gradient_checkpointing: raise ValueError(f'{self.__class__.__name__} does not support gradient checkpointing.') self.apply(partial(self._set_gradient_checkpointing, value=True)) def disable_gradient_checkpointing(self): if self._supports_gradient_checkpointing: self.apply(partial(self._set_gradient_checkpointing, value=False)) def set_use_memory_efficient_attention_xformers(self, valid: bool, attention_op: Optional[Callable]=None) -> None: def fn_recursive_set_mem_eff(module: torch.nn.Module): if hasattr(module, 'set_use_memory_efficient_attention_xformers'): module.set_use_memory_efficient_attention_xformers(valid, attention_op) for child in module.children(): fn_recursive_set_mem_eff(child) for module in self.children(): if isinstance(module, torch.nn.Module): fn_recursive_set_mem_eff(module) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.set_use_memory_efficient_attention_xformers(True, attention_op) def disable_xformers_memory_efficient_attention(self): self.set_use_memory_efficient_attention_xformers(False) def save_pretrained(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, save_function: Callable=None, state_dict: Optional[Dict[str, torch.Tensor]]=None): if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return if save_function is None: save_function = torch.save os.makedirs(save_directory, exist_ok=True) model_to_save = self if is_main_process: model_to_save.save_config(save_directory) if state_dict is None: state_dict = model_to_save.state_dict() weights_name = WEIGHTS_NAME save_function(state_dict, os.path.join(save_directory, weights_name)) logger.info(f'Model weights saved in {os.path.join(save_directory, weights_name)}') @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): cache_dir = kwargs.pop('cache_dir', MUSE_CACHE) ignore_mismatched_sizes = kwargs.pop('ignore_mismatched_sizes', False) force_download = kwargs.pop('force_download', False) resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) local_files_only = kwargs.pop('local_files_only', False) use_auth_token = kwargs.pop('use_auth_token', None) revision = kwargs.pop('revision', None) torch_dtype = kwargs.pop('torch_dtype', None) subfolder = kwargs.pop('subfolder', None) device_map = kwargs.pop('device_map', None) low_cpu_mem_usage = kwargs.pop('low_cpu_mem_usage', _LOW_CPU_MEM_USAGE_DEFAULT) if low_cpu_mem_usage is False and device_map is not None: raise ValueError(f'You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and dispatching. Please make sure to set `low_cpu_mem_usage=True`.') user_agent = {'diffusers': __version__, 'file_type': 'model', 'framework': 'pytorch'} config_path = pretrained_model_name_or_path model_file = None if model_file is None: model_file = _get_model_file(pretrained_model_name_or_path, weights_name=WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, subfolder=subfolder, user_agent=user_agent) if low_cpu_mem_usage: with accelerate.init_empty_weights(): (config, unused_kwargs) = cls.load_config(config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, subfolder=subfolder, device_map=device_map, **kwargs) model = cls.from_config(config, **unused_kwargs) if device_map is None: param_device = 'cpu' state_dict = load_state_dict(model_file) missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) if len(missing_keys) > 0: raise ValueError(f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are missing: \n {', '.join(missing_keys)}. \n Please make sure to pass `low_cpu_mem_usage=False` and `device_map=None` if you want to randomely initialize those weights or else make sure your checkpoint file is correct.") for (param_name, param) in state_dict.items(): accepts_dtype = 'dtype' in set(inspect.signature(set_module_tensor_to_device).parameters.keys()) if accepts_dtype: set_module_tensor_to_device(model, param_name, param_device, value=param, dtype=torch_dtype) else: set_module_tensor_to_device(model, param_name, param_device, value=param) else: accelerate.load_checkpoint_and_dispatch(model, model_file, device_map, dtype=torch_dtype) loading_info = {'missing_keys': [], 'unexpected_keys': [], 'mismatched_keys': [], 'error_msgs': []} else: (config, unused_kwargs) = cls.load_config(config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, subfolder=subfolder, device_map=device_map, **kwargs) model = cls.from_config(config, **unused_kwargs) state_dict = load_state_dict(model_file) (model, missing_keys, unexpected_keys, mismatched_keys, error_msgs) = cls._load_pretrained_model(model, state_dict, model_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes) loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'mismatched_keys': mismatched_keys, 'error_msgs': error_msgs} if torch_dtype is not None and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f'{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}.') elif torch_dtype is not None: model = model.to(torch_dtype) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.eval() if output_loading_info: return (model, loading_info) return model @classmethod def _load_pretrained_model(cls, model, state_dict, resolved_archive_file, pretrained_model_name_or_path, ignore_mismatched_sizes=False): model_state_dict = model.state_dict() loaded_keys = [k for k in state_dict.keys()] expected_keys = list(model_state_dict.keys()) original_loaded_keys = loaded_keys missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) model_to_load = model def _find_mismatched_keys(state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape: mismatched_keys.append((checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)) del state_dict[checkpoint_key] return mismatched_keys if state_dict is not None: mismatched_keys = _find_mismatched_keys(state_dict, model_state_dict, original_loaded_keys, ignore_mismatched_sizes) error_msgs = _load_state_dict_into_model(model_to_load, state_dict) if len(error_msgs) > 0: error_msg = '\n\t'.join(error_msgs) if 'size mismatch' in error_msg: error_msg += '\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.' raise RuntimeError(f'Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}') if len(unexpected_keys) > 0: logger.warning(f'Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).') else: logger.info(f'All model checkpoint weights were used when initializing {model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') elif len(mismatched_keys) == 0: logger.info(f'All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.') if len(mismatched_keys) > 0: mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for (key, shape1, shape2) in mismatched_keys]) logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized because the shapes did not match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') return (model, missing_keys, unexpected_keys, mismatched_keys, error_msgs) @property def device(self) -> device: return get_parameter_device(self) @property def dtype(self) -> torch.dtype: return get_parameter_dtype(self) def num_parameters(self, only_trainable: bool=False, exclude_embeddings: bool=False) -> int: if exclude_embeddings: embedding_param_names = [f'{name}.weight' for (name, module_type) in self.named_modules() if isinstance(module_type, torch.nn.Embedding)] non_embedding_parameters = [parameter for (name, parameter) in self.named_parameters() if name not in embedding_param_names] return sum((p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)) else: return sum((p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)) '' class FrozenDict(OrderedDict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for (key, value) in self.items(): setattr(self, key, value) self.__frozen = True def __delitem__(self, *args, **kwargs): raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.') def setdefault(self, *args, **kwargs): raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.') def pop(self, *args, **kwargs): raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.') def update(self, *args, **kwargs): raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.') def __setattr__(self, name, value): if hasattr(self, '__frozen') and self.__frozen: raise Exception(f'You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.') super().__setattr__(name, value) def __setitem__(self, name, value): if hasattr(self, '__frozen') and self.__frozen: raise Exception(f'You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.') super().__setitem__(name, value) class ConfigMixin: config_name = None ignore_for_config = [] has_compatibles = False _deprecated_kwargs = [] def register_to_config(self, **kwargs): if self.config_name is None: raise NotImplementedError(f'Make sure that {self.__class__} has defined a class name `config_name`') kwargs.pop('kwargs', None) for (key, value) in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err if not hasattr(self, '_internal_dict'): internal_dict = kwargs else: previous_dict = dict(self._internal_dict) internal_dict = {**self._internal_dict, **kwargs} logger.debug(f'Updating config from {previous_dict} to {internal_dict}') self._internal_dict = FrozenDict(internal_dict) def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs): if os.path.isfile(save_directory): raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file') os.makedirs(save_directory, exist_ok=True) output_config_file = os.path.join(save_directory, self.config_name) self.to_json_file(output_config_file) logger.info(f'Configuration saved in {output_config_file}') @classmethod def from_config(cls, config: Union[FrozenDict, Dict[str, Any]]=None, **kwargs): if 'pretrained_model_name_or_path' in kwargs: config = kwargs.pop('pretrained_model_name_or_path') if config is None: raise ValueError('Please make sure to provide a config as the first positional argument.') model = cls(**config) return model @classmethod def load_config(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return_unused_kwargs=False, **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]: cache_dir = kwargs.pop('cache_dir', MUSE_CACHE) force_download = kwargs.pop('force_download', False) resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) use_auth_token = kwargs.pop('use_auth_token', None) local_files_only = kwargs.pop('local_files_only', False) revision = kwargs.pop('revision', None) _ = kwargs.pop('mirror', None) subfolder = kwargs.pop('subfolder', None) user_agent = {'file_type': 'config'} pretrained_model_name_or_path = str(pretrained_model_name_or_path) if cls.config_name is None: raise ValueError('`self.config_name` is not defined. Note that one should not load a config from `ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`') if os.path.isfile(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path elif os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)): config_file = os.path.join(pretrained_model_name_or_path, cls.config_name) elif subfolder is not None and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)): config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) else: raise EnvironmentError(f'Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.') else: try: config_file = hf_hub_download(pretrained_model_name_or_path, filename=cls.config_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, subfolder=subfolder, revision=revision) except RepositoryNotFoundError: raise EnvironmentError(f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo with `use_auth_token` or log in with `huggingface-cli login`.") except RevisionNotFoundError: raise EnvironmentError(f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.") except EntryNotFoundError: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.') except HTTPError as err: raise EnvironmentError(f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}') except ValueError: raise EnvironmentError(f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.") except EnvironmentError: raise EnvironmentError(f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a {cls.config_name} file") try: config_dict = cls._dict_from_json_file(config_file) except (json.JSONDecodeError, UnicodeDecodeError): raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.") if return_unused_kwargs: return (config_dict, kwargs) return config_dict @staticmethod def _get_init_keys(cls): return set(dict(inspect.signature(cls.__init__).parameters).keys()) @classmethod def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): with open(json_file, 'r', encoding='utf-8') as reader: text = reader.read() return json.loads(text) def __repr__(self): return f'{self.__class__.__name__} {self.to_json_string()}' @property def config(self) -> Dict[str, Any]: return self._internal_dict def to_json_string(self) -> str: config_dict = self._internal_dict if hasattr(self, '_internal_dict') else {} config_dict['_class_name'] = self.__class__.__name__ config_dict['_version'] = __version__ def to_json_saveable(value): if isinstance(value, np.ndarray): value = value.tolist() elif isinstance(value, PosixPath): value = str(value) return value config_dict = {k: to_json_saveable(v) for (k, v) in config_dict.items()} return json.dumps(config_dict, indent=2, sort_keys=True) + '\n' def to_json_file(self, json_file_path: Union[str, os.PathLike]): with open(json_file_path, 'w', encoding='utf-8') as writer: writer.write(self.to_json_string()) def register_to_config(init): @functools.wraps(init) def inner_init(self, *args, **kwargs): init_kwargs = {k: v for (k, v) in kwargs.items() if not k.startswith('_')} config_init_kwargs = {k: v for (k, v) in kwargs.items() if k.startswith('_')} if not isinstance(self, ConfigMixin): raise RuntimeError(f'`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does not inherit from `ConfigMixin`.') ignore = getattr(self, 'ignore_for_config', []) new_kwargs = {} signature = inspect.signature(init) parameters = {name: p.default for (i, (name, p)) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore} for (arg, name) in zip(args, parameters.keys()): new_kwargs[name] = arg new_kwargs.update({k: init_kwargs.get(k, default) for (k, default) in parameters.items() if k not in ignore and k not in new_kwargs}) new_kwargs = {**config_init_kwargs, **new_kwargs} getattr(self, 'register_to_config')(**new_kwargs) init(self, *args, **init_kwargs) return inner_init # File: open-muse-main/muse/pipeline_muse.py import os from typing import List, Optional, Union, Tuple import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, CLIPConfig, CLIPTextModel, CLIPTextModelWithProjection, PreTrainedTokenizer, T5EncoderModel from .modeling_maskgit_vqgan import MaskGitVQGAN from .modeling_movq import MOVQ from .modeling_paella_vq import PaellaVQModel from .modeling_taming_vqgan import VQGANModel from .modeling_transformer import MaskGitTransformer, MaskGiTUViT from .sampling import get_mask_chedule class PipelineMuse: def __init__(self, vae: Union[VQGANModel, MOVQ, MaskGitVQGAN], transformer: Union[MaskGitTransformer, MaskGiTUViT], is_class_conditioned: bool=False, text_encoder: Optional[Union[T5EncoderModel, CLIPTextModel]]=None, tokenizer: Optional[PreTrainedTokenizer]=None) -> None: self.text_encoder = text_encoder self.tokenizer = tokenizer self.vae = vae self.transformer = transformer self.is_class_conditioned = is_class_conditioned self.device = 'cpu' def to(self, device='cpu', dtype=torch.float32) -> None: self.device = device self.dtype = dtype if not self.is_class_conditioned: self.text_encoder.to(device, dtype=dtype) self.transformer.to(device, dtype=dtype) self.vae.to(device, dtype=torch.float32) return self @torch.no_grad() def __call__(self, text: Optional[Union[str, List[str]]]=None, negative_text: Optional[Union[str, List[str]]]='', prompt_embeds: Optional[torch.Tensor]=None, pooled_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_embeds: Optional[torch.Tensor]=None, class_ids: Optional[Union[int, List[int]]]=None, timesteps: int=16, noise_schedule: str='cosine', guidance_scale: float=10.0, guidance_schedule=None, temperature: Union[float, Tuple[float]]=(2, 0), topk_filter_thres: float=0.9, num_images_per_prompt: int=1, use_maskgit_generate: bool=True, generator: Optional[torch.Generator]=None, use_fp16: bool=False, noise_type='mask', predict_all_tokens=False, orig_size=(512, 512), crop_coords=(0, 0), aesthetic_score=6.0, return_intermediate: bool=False, use_tqdm=True, transformer_seq_len=None, clip_skip: int=None): if text is None and class_ids is None: raise ValueError('Either text or class_ids must be provided.') if text is not None and class_ids is not None: raise ValueError('Only one of text or class_ids may be provided.') if class_ids is not None: if isinstance(class_ids, int): class_ids = [class_ids] class_ids = torch.tensor(class_ids, device=self.device, dtype=torch.long) class_ids = class_ids.repeat_interleave(num_images_per_prompt, dim=0) model_inputs = {'class_ids': class_ids} else: if isinstance(text, str): text = [text] if prompt_embeds is None: input_ids = self.tokenizer(text, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids input_ids = input_ids.to(self.device) if self.transformer.config.add_cond_embeds: if prompt_embeds is not None and pooled_embeds is not None: (pooled_embeds, encoder_hidden_states) = (pooled_embeds, prompt_embeds) pooled_embeds = pooled_embeds.to(self.device, dtype=self.text_encoder.dtype) encoder_hidden_states = encoder_hidden_states.to(self.device, dtype=self.text_encoder.dtype) else: clip_layer_idx = -(clip_skip + 1) if clip_skip is not None else -2 outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) (pooled_embeds, encoder_hidden_states) = (outputs.text_embeds, outputs.hidden_states[clip_layer_idx]) else: encoder_hidden_states = self.text_encoder(input_ids).last_hidden_state pooled_embeds = None if negative_text is not None: if isinstance(negative_text, str): negative_text = [negative_text] * len(text) negative_input_ids = self.tokenizer(negative_text, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids negative_input_ids = negative_input_ids.to(self.device) if self.transformer.config.add_cond_embeds: outputs = self.text_encoder(negative_input_ids, return_dict=True, output_hidden_states=True) negative_pooled_embeds = outputs.text_embeds negative_encoder_hidden_states = outputs.hidden_states[-2] else: negative_encoder_hidden_states = self.text_encoder(negative_input_ids).last_hidden_state negative_pooled_embeds = None elif negative_prompt_embeds is not None: negative_encoder_hidden_states = negative_prompt_embeds.to(self.device, dtype=self.text_encoder.dtype) negative_pooled_embeds = negative_pooled_embeds.to(self.device, dtype=self.text_encoder.dtype) else: negative_encoder_hidden_states = None negative_pooled_embeds = None (bs_embed, seq_len, _) = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.repeat(1, num_images_per_prompt, 1) encoder_hidden_states = encoder_hidden_states.view(bs_embed * num_images_per_prompt, seq_len, -1) if pooled_embeds is not None: (bs_embed, _) = pooled_embeds.shape pooled_embeds = pooled_embeds.repeat(1, num_images_per_prompt) pooled_embeds = pooled_embeds.view(bs_embed * num_images_per_prompt, -1) if negative_pooled_embeds is not None: (bs_embed, _) = negative_pooled_embeds.shape negative_pooled_embeds = negative_pooled_embeds.repeat(1, num_images_per_prompt) negative_pooled_embeds = negative_pooled_embeds.view(bs_embed * num_images_per_prompt, -1) if negative_encoder_hidden_states is not None: (bs_embed, seq_len, _) = negative_encoder_hidden_states.shape negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) negative_encoder_hidden_states = negative_encoder_hidden_states.view(bs_embed * num_images_per_prompt, seq_len, -1) if negative_encoder_hidden_states is None: empty_input = self.tokenizer('', padding='max_length', return_tensors='pt').input_ids.to(self.text_encoder.device) outputs = self.text_encoder(empty_input, output_hidden_states=True) empty_embeds = outputs.hidden_states[-2] empty_cond_embeds = outputs[0] else: (empty_embeds, empty_cond_embeds) = (None, None) model_inputs = {'encoder_hidden_states': encoder_hidden_states, 'negative_embeds': negative_encoder_hidden_states, 'cond_embeds': pooled_embeds, 'negative_cond_embeds': negative_pooled_embeds, 'empty_embeds': empty_embeds, 'empty_cond_embeds': empty_cond_embeds} if self.transformer.config.add_micro_cond_embeds: micro_conds = list(orig_size) + list(crop_coords) + [aesthetic_score] micro_conds = torch.tensor(micro_conds, device=self.device, dtype=encoder_hidden_states.dtype) micro_conds = micro_conds.unsqueeze(0) model_inputs['micro_conds'] = micro_conds generate = self.transformer.generate if use_maskgit_generate: generate = self.transformer.generate2 with torch.autocast('cuda', enabled=use_fp16): outputs = generate(**model_inputs, timesteps=timesteps, guidance_scale=guidance_scale, guidance_schedule=guidance_schedule, temperature=temperature, topk_filter_thres=topk_filter_thres, generator=generator, noise_type=noise_type, noise_schedule=get_mask_chedule(noise_schedule), predict_all_tokens=predict_all_tokens, return_intermediate=return_intermediate, use_tqdm=use_tqdm, seq_len=transformer_seq_len) if return_intermediate: (generated_tokens, intermediate) = outputs else: generated_tokens = outputs images = self.vae.decode_code(generated_tokens) if return_intermediate: intermediate_images = [self.vae.decode_code(tokens) for tokens in intermediate] images = [self.to_pil_image(image) for image in images] if return_intermediate: intermediate_images = [[self.to_pil_image(image) for image in images] for images in intermediate_images] return (images, intermediate_images) return images def to_pil_image(self, image: torch.Tensor): image = image.permute(1, 2, 0).cpu().numpy() image = 2.0 * image - 1.0 image = np.clip(image, -1.0, 1.0) image = (image + 1.0) / 2.0 image = (255 * image).astype(np.uint8) image = Image.fromarray(image).convert('RGB') return image @classmethod def from_pretrained(cls, model_name_or_path: str=None, text_encoder_path: Optional[str]=None, vae_path: Optional[str]=None, transformer_path: Optional[str]=None, vae=None, text_encoder=None, transformer=None, is_class_conditioned: bool=False) -> None: if model_name_or_path is None: if text_encoder_path is None or vae_path is None or transformer_path is None: raise ValueError('If model_name_or_path is None, then text_encoder_path, vae_path, and transformer_path must be provided.') text_encoder_args = None tokenizer_args = None if not is_class_conditioned: text_encoder_args = {'pretrained_model_name_or_path': text_encoder_path} tokenizer_args = {'pretrained_model_name_or_path': text_encoder_path} vae_args = {'pretrained_model_name_or_path': vae_path} transformer_args = {'pretrained_model_name_or_path': transformer_path} else: text_encoder_args = None tokenizer_args = None if not is_class_conditioned: text_encoder_args = {'pretrained_model_name_or_path': model_name_or_path, 'subfolder': 'text_encoder'} tokenizer_args = {'pretrained_model_name_or_path': model_name_or_path, 'subfolder': 'text_encoder'} vae_args = {'pretrained_model_name_or_path': model_name_or_path, 'subfolder': 'vae'} transformer_args = {'pretrained_model_name_or_path': model_name_or_path, 'subfolder': 'transformer'} if not is_class_conditioned: if text_encoder is None: text_encoder = CLIPTextModelWithProjection.from_pretrained(**text_encoder_args) tokenizer = AutoTokenizer.from_pretrained(**tokenizer_args) transformer_config = MaskGitTransformer.load_config(**transformer_args) if transformer is not None: ... elif transformer_config['_class_name'] == 'MaskGitTransformer': transformer = MaskGitTransformer.from_pretrained(**transformer_args) elif transformer_config['_class_name'] == 'MaskGiTUViT' or transformer_config['_class_name'] == 'MaskGiTUViT_v2': transformer = MaskGiTUViT.from_pretrained(**transformer_args) else: raise ValueError(f"Unknown Transformer class: {transformer_config['_class_name']}") vae_config = MaskGitVQGAN.load_config(**vae_args) if vae is not None: ... elif vae_config['_class_name'] == 'VQGANModel': vae = VQGANModel.from_pretrained(**vae_args) elif vae_config['_class_name'] == 'MaskGitVQGAN': vae = MaskGitVQGAN.from_pretrained(**vae_args) elif vae_config['_class_name'] == 'MOVQ': vae = MOVQ.from_pretrained(**vae_args) elif vae_config['_class_name'] == 'PaellaVQModel': vae = PaellaVQModel.from_pretrained(**vae_args) else: raise ValueError(f"Unknown VAE class: {vae_config['_class_name']}") if is_class_conditioned: return cls(vae=vae, transformer=transformer, is_class_conditioned=is_class_conditioned) return cls(vae=vae, transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, is_class_conditioned=is_class_conditioned) def save_pretrained(self, save_directory: Union[str, os.PathLike]) -> None: if not self.is_class_conditioned: self.text_encoder.save_pretrained(os.path.join(save_directory, 'text_encoder')) self.tokenizer.save_pretrained(os.path.join(save_directory, 'text_encoder')) self.vae.save_pretrained(os.path.join(save_directory, 'vae')) self.transformer.save_pretrained(os.path.join(save_directory, 'transformer')) class PipelineMuseInpainting(PipelineMuse): @torch.no_grad() def __call__(self, image: Image, mask: torch.BoolTensor, text: Optional[Union[str, List[str]]]=None, negative_text: Optional[Union[str, List[str]]]=None, class_ids: torch.LongTensor=None, timesteps: int=8, guidance_scale: float=8.0, guidance_schedule=None, temperature: float=1.0, topk_filter_thres: float=0.9, num_images_per_prompt: int=1, use_maskgit_generate: bool=True, generator: Optional[torch.Generator]=None, use_fp16: bool=False, image_size: int=256, orig_size=(256, 256), crop_coords=(0, 0), aesthetic_score=6.0): from torchvision import transforms assert use_maskgit_generate if text is None and class_ids is None: raise ValueError('Either text or class_ids must be provided.') if text is not None and class_ids is not None: raise ValueError('Only one of text or class_ids may be provided.') encode_transform = transforms.Compose([transforms.Resize(image_size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(image_size), transforms.ToTensor()]) pixel_values = encode_transform(image).unsqueeze(0).to(self.device) (_, image_tokens) = self.vae.encode(pixel_values) mask_token_id = self.transformer.config.mask_token_id image_tokens[mask[None]] = mask_token_id image_tokens = image_tokens.repeat(num_images_per_prompt, 1) if class_ids is not None: if isinstance(class_ids, int): class_ids = [class_ids] class_ids = torch.tensor(class_ids, device=self.device, dtype=torch.long) class_ids = class_ids.repeat_interleave(num_images_per_prompt, dim=0) model_inputs = {'class_ids': class_ids} else: if isinstance(text, str): text = [text] input_ids = self.tokenizer(text, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids input_ids = input_ids.to(self.device) if self.transformer.config.add_cond_embeds: outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) (pooled_embeds, encoder_hidden_states) = (outputs.text_embeds, outputs.hidden_states[-2]) else: encoder_hidden_states = self.text_encoder(input_ids).last_hidden_state pooled_embeds = None if negative_text is not None: if isinstance(negative_text, str): negative_text = [negative_text] negative_input_ids = self.tokenizer(negative_text, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids negative_input_ids = negative_input_ids.to(self.device) negative_encoder_hidden_states = self.text_encoder(negative_input_ids).last_hidden_state else: negative_encoder_hidden_states = None (bs_embed, seq_len, _) = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.repeat(1, num_images_per_prompt, 1) encoder_hidden_states = encoder_hidden_states.view(bs_embed * num_images_per_prompt, seq_len, -1) if negative_encoder_hidden_states is not None: (bs_embed, seq_len, _) = negative_encoder_hidden_states.shape negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) negative_encoder_hidden_states = negative_encoder_hidden_states.view(bs_embed * num_images_per_prompt, seq_len, -1) empty_input = self.tokenizer('', padding='max_length', return_tensors='pt').input_ids.to(self.text_encoder.device) outputs = self.text_encoder(empty_input, output_hidden_states=True) empty_embeds = outputs.hidden_states[-2] empty_cond_embeds = outputs[0] model_inputs = {'encoder_hidden_states': encoder_hidden_states, 'negative_embeds': negative_encoder_hidden_states, 'empty_embeds': empty_embeds, 'empty_cond_embeds': empty_cond_embeds, 'cond_embeds': pooled_embeds} if self.transformer.config.add_micro_cond_embeds: micro_conds = list(orig_size) + list(crop_coords) + [aesthetic_score] micro_conds = torch.tensor(micro_conds, device=self.device, dtype=encoder_hidden_states.dtype) micro_conds = micro_conds.unsqueeze(0) model_inputs['micro_conds'] = micro_conds generate = self.transformer.generate2 with torch.autocast('cuda', enabled=use_fp16): generated_tokens = generate(input_ids=image_tokens, **model_inputs, timesteps=timesteps, guidance_scale=guidance_scale, guidance_schedule=guidance_schedule, temperature=temperature, topk_filter_thres=topk_filter_thres, generator=generator) images = self.vae.decode_code(generated_tokens) images = [self.to_pil_image(image) for image in images] return images # File: open-muse-main/muse/sampling.py import math from functools import partial import torch def log(t, eps=1e-20): return torch.log(t.clamp(min=eps)) def gumbel_noise(t, generator=None): noise = torch.zeros_like(t).uniform_(0, 1, generator=generator) return -log(-log(noise)) def gumbel_sample(t, temperature=1.0, dim=-1, generator=None): return (t / max(temperature, 1e-10) + gumbel_noise(t, generator=generator)).argmax(dim=dim) def top_k(logits, thres=0.9): k = math.ceil((1 - thres) * logits.shape[-1]) (val, ind) = logits.topk(k, dim=-1) probs = torch.full_like(logits, float('-inf')) probs.scatter_(2, ind, val) return probs def mask_by_random_topk(mask_len, probs, temperature=1.0, generator=None): confidence = log(probs) + temperature * gumbel_noise(probs, generator=generator) sorted_confidence = torch.sort(confidence, dim=-1).values cut_off = torch.gather(sorted_confidence, 1, mask_len.long()) masking = confidence < cut_off return masking def cosine_schedule(t): return torch.cos(t * math.pi * 0.5) def linear_schedule(t): mask_ratio = 1 - t mask_ratio = mask_ratio.clamp(min=1e-06, max=1.0) return mask_ratio def pow(t, method): exponent = float(method.replace('pow', '')) mask_ratio = 1.0 - t ** exponent mask_ratio = mask_ratio.clamp(min=1e-06, max=1.0) return mask_ratio def sigmoid_schedule(t, start=-3, end=3, tau=1.0, clip_min=1e-06): for item in [t, start, end, tau]: item = torch.tensor(item) if not torch.is_tensor(item) else item v_start = torch.sigmoid(torch.tensor(start / tau)) v_end = torch.sigmoid(torch.tensor(end / tau)) output = torch.sigmoid((t * (end - start) + start) / tau) output = (v_end - output) / (v_end - v_start) return torch.clip(output, clip_min, 1.0) def get_mask_chedule(method, **schedule_kwargs): if method == 'cosine': return cosine_schedule elif method == 'linear': return linear_schedule elif 'pow' in method: return partial(pow, method=method) elif method == 'sigmoid': return partial(sigmoid_schedule, **schedule_kwargs) else: raise ValueError('Unknown schedule method: {}'.format(method)) # File: open-muse-main/muse/training_utils.py import copy import os import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import pandas as pd import torch import torch.nn.functional as F def enable_full_determinism(seed: int): set_seed(seed) os.environ['CUDA_LAUNCH_BLOCKING'] = '1' os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':16:8' torch.use_deterministic_algorithms(True) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def set_seed(seed: int): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) class EMA: def __init__(self, parameters: Iterable[torch.nn.Parameter], decay: float=0.9999, min_decay: float=0.0, update_after_step: int=0, use_ema_warmup: bool=False, inv_gamma: Union[float, int]=1.0, power: Union[float, int]=2 / 3, model_cls: Optional[Any]=None, model_config: Dict[str, Any]=None, **kwargs): parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] self.temp_stored_params = None self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_ema_warmup = use_ema_warmup self.inv_gamma = inv_gamma self.power = power self.optimization_step = 0 self.cur_decay_value = None self.model_cls = model_cls self.model_config = model_config @classmethod def from_pretrained(cls, path, model_cls) -> 'EMA': (_, ema_kwargs) = model_cls.load_config(path, return_unused_kwargs=True) model = model_cls.from_pretrained(path) ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config) ema_model.load_state_dict(ema_kwargs) return ema_model def save_pretrained(self, path): if self.model_cls is None: raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.') if self.model_config is None: raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.') model = self.model_cls.from_config(self.model_config) state_dict = self.state_dict() state_dict.pop('shadow_params', None) model.register_to_config(**state_dict) self.copy_to(model.parameters()) model.save_pretrained(path) def get_decay(self, optimization_step: int) -> float: step = max(0, optimization_step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_ema_warmup: cur_decay_value = 1 - (1 + step / self.inv_gamma) ** (-self.power) else: cur_decay_value = (1 + step) / (10 + step) cur_decay_value = min(cur_decay_value, self.decay) cur_decay_value = max(cur_decay_value, self.min_decay) return cur_decay_value @torch.no_grad() def step(self, parameters: Iterable[torch.nn.Parameter]): parameters = list(parameters) self.optimization_step += 1 decay = self.get_decay(self.optimization_step) self.cur_decay_value = decay one_minus_decay = 1 - decay for (s_param, param) in zip(self.shadow_params, parameters): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param)) else: s_param.copy_(param) torch.cuda.empty_cache() def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: parameters = list(parameters) for (s_param, param) in zip(self.shadow_params, parameters): param.data.copy_(s_param.to(param.device).data) def to(self, device=None, dtype=None) -> None: self.shadow_params = [p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params] def state_dict(self) -> dict: return {'decay': self.decay, 'min_decay': self.min_decay, 'optimization_step': self.optimization_step, 'update_after_step': self.update_after_step, 'use_ema_warmup': self.use_ema_warmup, 'inv_gamma': self.inv_gamma, 'power': self.power, 'shadow_params': self.shadow_params} def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: if self.temp_stored_params is None: raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights to `restore()`') for (c_param, param) in zip(self.temp_stored_params, parameters): param.data.copy_(c_param.data) self.temp_stored_params = None def load_state_dict(self, state_dict: dict) -> None: state_dict = copy.deepcopy(state_dict) self.decay = state_dict.get('decay', self.decay) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('Decay must be between 0 and 1') self.min_decay = state_dict.get('min_decay', self.min_decay) if not isinstance(self.min_decay, float): raise ValueError('Invalid min_decay') self.optimization_step = state_dict.get('optimization_step', self.optimization_step) if not isinstance(self.optimization_step, int): raise ValueError('Invalid optimization_step') self.update_after_step = state_dict.get('update_after_step', self.update_after_step) if not isinstance(self.update_after_step, int): raise ValueError('Invalid update_after_step') self.use_ema_warmup = state_dict.get('use_ema_warmup', self.use_ema_warmup) if not isinstance(self.use_ema_warmup, bool): raise ValueError('Invalid use_ema_warmup') self.inv_gamma = state_dict.get('inv_gamma', self.inv_gamma) if not isinstance(self.inv_gamma, (float, int)): raise ValueError('Invalid inv_gamma') self.power = state_dict.get('power', self.power) if not isinstance(self.power, (float, int)): raise ValueError('Invalid power') shadow_params = state_dict.get('shadow_params', None) if shadow_params is not None: self.shadow_params = shadow_params if not isinstance(self.shadow_params, list): raise ValueError('shadow_params must be a list') if not all((isinstance(p, torch.Tensor) for p in self.shadow_params)): raise ValueError('shadow_params must all be Tensors') def pixel_entropy_per_percent_masked_bucket(logits, input_ids, mask_id): masked_tokens = input_ids == mask_id num_masked_pixels = masked_tokens.sum(-1) probs = F.softmax(logits, dim=-1) log_probs = F.log_softmax(logits, dim=-1) entropy_per_pixel = -(probs * log_probs).sum(-1) entropy_per_pixel[~masked_tokens] = 0 entropy_per_image_numerator = entropy_per_pixel.sum(-1) entropy_per_image = entropy_per_image_numerator / num_masked_pixels total_buckets = 10 masked_buckets = input_ids_to_masked_buckets(input_ids, mask_id, total_buckets) entropy_by_masked_bucket = average_by_buckets(entropy_per_image, masked_buckets, total_buckets) return entropy_by_masked_bucket def image_entropy_per_percent_masked_bucket(logits, input_ids, mask_id): masked_tokens = input_ids == mask_id num_masked_pixels = masked_tokens.sum(-1, keepdim=True) pixel_probs = F.softmax(logits, dim=-1) pixel_probs[~masked_tokens] = 0 image_probs_numerator = pixel_probs.sum(-2) image_probs = image_probs_numerator / num_masked_pixels image_log_probs = image_probs.log() entropy_per_image = -(image_probs * image_log_probs).sum(-1) total_buckets = 10 masked_buckets = input_ids_to_masked_buckets(input_ids, mask_id, total_buckets) entropy_by_masked_bucket = average_by_buckets(entropy_per_image, masked_buckets, total_buckets) return entropy_by_masked_bucket def cross_entropy_per_percent_masked_bucket(logits, labels, input_ids, mask_id, output_size, label_smoothing): cross_entropy_per_image = F.cross_entropy(logits.view(-1, output_size), labels.view(-1), ignore_index=-100, label_smoothing=label_smoothing, reduction='none') total_buckets = 10 masked_buckets = input_ids_to_masked_buckets(input_ids, mask_id, total_buckets) cross_entropy_by_percent_masked_bucket = average_by_buckets(cross_entropy_per_image, masked_buckets, total_buckets) return cross_entropy_by_percent_masked_bucket def token_probability_distributions_per_percent_masked_bucket(logits, input_ids, mask_id): probs = F.softmax(logits, dim=-1) total_buckets = 10 masked_buckets = input_ids_to_masked_buckets(input_ids, mask_id, total_buckets) data = [] for bucket_idx in range(total_buckets): indices_for_bucket = masked_buckets[masked_buckets == bucket_idx] if indices_for_bucket.shape[0] == 0: continue index_for_bucket = indices_for_bucket[0] image_probs = probs[index_for_bucket] input_ids_for_image = input_ids[index_for_bucket] masked_pixels_probs = image_probs[input_ids_for_image == mask_id] masked_pixel_probs = masked_pixels_probs[0] masked_pixel_probs = masked_pixel_probs.cpu().numpy() for masked_pixel_prob in masked_pixel_probs: data.append({'bucket': bucket_idx, 'masked_pixel_prob': masked_pixel_prob}) df = pd.DataFrame(data) return df def average_by_buckets(values, masked_buckets, total_buckets): (unique_buckets, bucket_counts) = masked_buckets.unique(dim=0, return_counts=True) numerator = torch.zeros(total_buckets, device=values.device) numerator.scatter_add_(0, masked_buckets, values) denominator = torch.ones(total_buckets, device=values.device, dtype=torch.long) denominator[unique_buckets] = bucket_counts averaged_by_buckets = numerator / denominator return averaged_by_buckets def input_ids_to_masked_buckets(input_ids, mask_id, total_buckets=10): assert total_buckets == 10 masked_percent = (input_ids == mask_id).sum(-1) / input_ids.shape[-1] masked_buckets = ((0 < masked_percent) & (masked_percent <= 0.1)) * 0 + ((0.1 < masked_percent) & (masked_percent <= 0.2)) * 1 + ((0.2 < masked_percent) & (masked_percent <= 0.3)) * 2 + ((0.3 < masked_percent) & (masked_percent <= 0.4)) * 3 + ((0.4 < masked_percent) & (masked_percent <= 0.5)) * 4 + ((0.5 < masked_percent) & (masked_percent <= 0.6)) * 5 + ((0.6 < masked_percent) & (masked_percent <= 0.7)) * 6 + ((0.7 < masked_percent) & (masked_percent <= 0.8)) * 7 + ((0.8 < masked_percent) & (masked_percent <= 0.9)) * 8 + ((0.9 < masked_percent) & (masked_percent <= 1.0)) * 9 return masked_buckets # File: open-muse-main/training/data.py import os import io import itertools import json import math import random import re from functools import partial from typing import List, Optional, Union import PIL import webdataset as wds import yaml from braceexpand import braceexpand from torch.utils.data import default_collate from torchvision import transforms from transformers import PreTrainedTokenizer from webdataset.tariterators import base_plus_ext, tar_file_expander, url_opener, valid_sample person_token = ['a person', 'someone', 'somebody'] def replace_person_token(t): t = re.sub('([,\\s]*(and)*[,\\s]*)+', ' people ', t) while '' in t: t = t.replace('', f' {random.choices(person_token)} ', 1) return t def filter_keys(key_set): def _f(dictionary): return {k: v for (k, v) in dictionary.items() if k in key_set} return _f def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None): current_sample = None for filesample in data: assert isinstance(filesample, dict) (fname, value) = (filesample['fname'], filesample['data']) (prefix, suffix) = keys(fname) if prefix is None: continue if lcase: suffix = suffix.lower() if current_sample is None or prefix != current_sample['__key__'] or suffix in current_sample: if valid_sample(current_sample): yield current_sample current_sample = dict(__key__=prefix, __url__=filesample['__url__']) if suffixes is None or suffix in suffixes: current_sample[suffix] = value if valid_sample(current_sample): yield current_sample def tarfile_to_samples_nothrow(src, handler=wds.warn_and_continue): streams = url_opener(src, handler=handler) files = tar_file_expander(streams, handler=handler) samples = group_by_keys_nothrow(files, handler=handler) return samples def get_orig_size(json): return (int(json.get('original_width', 0.0)), int(json.get('original_height', 0.0))) def get_aesthetic_score(json): if 'aesthetic' in json: a = json['aesthetic'] elif 'AESTHETIC_SCORE' in json: a = json['AESTHETIC_SCORE'] elif 'aesthetic_score_laion_v2' in json: a = json['aesthetic_score_laion_v2'] elif 'stability_metadata' in json and 'aes_scorelv2' in json['stability_metadata']: a = json['stability_metadata']['aes_scorelv2'] else: a = 0.0 a = float(a) return a class ImageNetTransform: def __init__(self, resolution, center_crop=True, random_flip=False): self.train_transform = transforms.Compose([transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(resolution) if center_crop else transforms.RandomCrop(resolution), transforms.RandomHorizontalFlip() if random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor()]) self.eval_transform = transforms.Compose([transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(resolution), transforms.ToTensor()]) def image_transform(example, resolution=256): image = example['image'] image = transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BILINEAR)(image) (c_top, c_left, _, _) = transforms.RandomCrop.get_params(image, output_size=(resolution, resolution)) image = transforms.functional.crop(image, c_top, c_left, resolution, resolution) image = transforms.ToTensor()(image) example['image'] = image example['crop_coords'] = (c_top, c_left) return example class ClassificationDataset: def __init__(self, train_shards_path_or_url: Union[str, List[str]], eval_shards_path_or_url: Union[str, List[str]], num_train_examples: int, per_gpu_batch_size: int, global_batch_size: int, num_workers: int, resolution: int=256, return_text: bool=False, tokenizer: PreTrainedTokenizer=None, max_seq_length: int=16, center_crop: bool=True, random_flip: bool=False, imagenet_class_mapping_path=None, shuffle_buffer_size: int=1000, pin_memory: bool=False, persistent_workers: bool=False, **kwargs): transform = ImageNetTransform(resolution, center_crop, random_flip) if return_text: if imagenet_class_mapping_path is None: raise ValueError('imagenet_class_mapping_path must be provided when return_text is True') with open(imagenet_class_mapping_path, 'r') as f: self.class_mapping = json.load(f) def tokenize(imagenet_class_id): text = self.class_mapping[str(imagenet_class_id)] input_ids = tokenizer(text, max_length=max_seq_length, padding='max_length', truncation=True, return_tensors='pt').input_ids return input_ids[0] processing_pipeline = [wds.rename(image='jpg;png;jpeg;webp', input_ids='cls', text_raw='cls', class_id='cls', handler=wds.warn_and_continue), wds.map(filter_keys(set(['image', 'input_ids', 'text_raw', 'class_idx']))), wds.map_dict(image=transform.train_transform, input_ids=tokenize, text_raw=lambda class_idx: self.class_mapping[str(class_idx)]), wds.to_tuple('image', 'input_ids')] else: processing_pipeline = [wds.rename(image='jpg;png;jpeg;webp', class_id='cls', handler=wds.warn_and_continue), wds.map(filter_keys(set(['image', 'class_id']))), wds.map_dict(image=transform.train_transform, class_id=lambda x: int(x)), wds.to_tuple('image', 'class_id')] pipeline = [wds.ResampledShards(train_shards_path_or_url), wds.tarfile_to_samples(handler=wds.ignore_and_continue), wds.shuffle(shuffle_buffer_size), wds.decode('pil', handler=wds.ignore_and_continue), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate)] num_batches = math.ceil(num_train_examples / global_batch_size) num_worker_batches = math.ceil(num_train_examples / (global_batch_size * num_workers)) num_batches = num_worker_batches * num_workers num_samples = num_batches * global_batch_size self._train_dataset = wds.DataPipeline(*pipeline).with_epoch(num_worker_batches) self._train_dataloader = wds.WebLoader(self._train_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers) self._train_dataloader.num_batches = num_batches self._train_dataloader.num_samples = num_samples pipeline = [wds.SimpleShardList(eval_shards_path_or_url), wds.split_by_worker, wds.tarfile_to_samples(handler=wds.ignore_and_continue), wds.decode('pil', handler=wds.ignore_and_continue), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=True, collation_fn=default_collate)] self._eval_dataset = wds.DataPipeline(*pipeline) self._eval_dataloader = wds.WebLoader(self._eval_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers) @property def train_dataset(self): return self._train_dataset @property def train_dataloader(self): return self._train_dataloader @property def eval_dataset(self): return self._eval_dataset @property def eval_dataloader(self): return self._eval_dataloader class WebdatasetSelect: def __init__(self, min_size=256, max_pwatermark=0.5, min_aesthetic_score=4.9, require_marked_as_ok_by_spawning=False, require_marked_as_not_getty=False, max_pnsfw=None): self.min_size = min_size self.max_pwatermark = max_pwatermark self.min_aesthetic_score = min_aesthetic_score self.require_marked_as_ok_by_spawning = require_marked_as_ok_by_spawning self.require_marked_as_not_getty = require_marked_as_not_getty self.max_pnsfw = max_pnsfw def __call__(self, x): if 'json' not in x: return False try: x_json = json.loads(x['json']) except: return False if 'original_width' not in x_json or 'original_height' not in x_json: return False original_width = x_json['original_width'] original_height = x_json['original_height'] is_less_than_min_size = original_width < self.min_size or original_height < self.min_size if is_less_than_min_size: return False if ('pwatermark' not in x_json or x_json['pwatermark'] is None) and 'watermark_score' not in x_json and ('stability_metadata' not in x_json or 'p_watermarkdf' not in x_json['stability_metadata']): return False if 'pwatermark' in x_json and x_json['pwatermark'] is not None: is_watermarked = x_json['pwatermark'] > self.max_pwatermark if is_watermarked: return False if 'watermark_score' in x_json: is_watermarked_coyo = x_json['watermark_score'] > self.max_pwatermark if is_watermarked_coyo: return False if 'stability_metadata' in x_json and 'p_watermarkdf' in x_json['stability_metadata']: is_watermarked_stability_metadata = x_json['stability_metadata']['p_watermarkdf'] > self.max_pwatermark if is_watermarked_stability_metadata: return False if 'aesthetic' not in x_json and 'AESTHETIC_SCORE' not in x_json and ('aesthetic_score_laion_v2' not in x_json) and ('stability_metadata' not in x_json or 'aes_scorelv2' not in x_json['stability_metadata']): return False if 'aesthetic' in x_json: is_under_min_aesthetic_threshold = x_json['aesthetic'] < self.min_aesthetic_score if is_under_min_aesthetic_threshold: return False if 'AESTHETIC_SCORE' in x_json: is_under_min_aesthetic_threshold_b = x_json['AESTHETIC_SCORE'] < self.min_aesthetic_score if is_under_min_aesthetic_threshold_b: return False if 'aesthetic_score_laion_v2' in x_json: is_under_min_aesthetic_threshold_coyo = x_json['aesthetic_score_laion_v2'] < self.min_aesthetic_score if is_under_min_aesthetic_threshold_coyo: return False if 'stability_metadata' in x_json and 'aes_scorelv2' in x_json['stability_metadata']: is_under_min_aesthetic_threshold_stability_metadata = x_json['stability_metadata']['aes_scorelv2'] < self.min_aesthetic_score if is_under_min_aesthetic_threshold_stability_metadata: return False if self.require_marked_as_ok_by_spawning: if 'stability_metadata' not in x_json or 'is_spawning' not in x_json['stability_metadata']: return False is_marked_as_not_ok_by_spawning = x_json['stability_metadata']['is_spawning'] if is_marked_as_not_ok_by_spawning: return False if self.require_marked_as_not_getty: if 'stability_metadata' not in x_json or 'is_getty' not in x_json['stability_metadata']: return False is_marked_as_getty = x_json['stability_metadata']['is_getty'] if is_marked_as_getty: return False if self.max_pnsfw is not None: if 'stability_metadata' not in x_json or 'p_nsfwdf' not in x_json['stability_metadata']: return False is_above_max_nsfw = x_json['stability_metadata']['p_nsfwdf'] > self.max_pnsfw if is_above_max_nsfw: return False return True def sdxl_synthetic_dataset_map(sample): clip_scores = sample['clip_scores.txt'].decode('utf-8') clip_scores = clip_scores.split(',') clip_scores = [float(x) for x in clip_scores] index_of_max = 0 for i in range(1, len(clip_scores)): if clip_scores[i] > clip_scores[index_of_max]: index_of_max = i key_of_best_clip_score_image = f'{index_of_max}.png' if key_of_best_clip_score_image not in sample: raise ValueError(f'{key_of_best_clip_score_image} was not found in sample. The dataset should have files ..png where coresponds to an index of the clip scores in clip_scores.txt') return {'__key__': sample['__key__'], '__url__': sample['__url__'], 'txt': sample['txt'], 'png': sample[key_of_best_clip_score_image], 'json': json.dumps({'aesthetic': 5, 'original_width': 1024, 'original_height': 1024}).encode()} def ds_clean_upscaled_map(sample): with io.BytesIO(sample['png']) as stream: image = PIL.Image.open(stream) image.load() return {'__key__': sample['__key__'], '__url__': sample['__url__'], 'txt': sample['txt'], 'png': sample['png'], 'json': json.dumps({'aesthetic': 5, 'original_width': image.width, 'original_height': image.height}).encode()} def ds_clean_map(sample): with io.BytesIO(sample['png']) as stream: image = PIL.Image.open(stream) image.load() height = image.height // 2 width = image.width // 2 image = image.crop((0, 0, width, height)) image_bytes = io.BytesIO() image.save(image_bytes, format='PNG') image = image_bytes.getvalue() return {'__key__': sample['__key__'], '__url__': sample['__url__'], 'txt': sample['txt'], 'png': image, 'json': json.dumps({'aesthetic': 5, 'original_width': width, 'original_height': height}).encode()} class Text2ImageDataset: def __init__(self, train_shards_path_or_url: Union[str, List[str]], eval_shards_path_or_url: Union[str, List[str]], tokenizer: PreTrainedTokenizer, max_seq_length: int, num_train_examples: int, per_gpu_batch_size: int, global_batch_size: int, num_workers: int, resolution: int=256, center_crop: bool=True, random_flip: bool=False, shuffle_buffer_size: int=1000, pin_memory: bool=False, persistent_workers: bool=False, is_pre_encoded: bool=False, vae_checkpoint: Optional[str]=None, text_encoder_checkpoint: Optional[str]=None, use_filtered_dataset: bool=False, require_marked_as_ok_by_spawning: bool=False, require_marked_as_not_getty: bool=False, max_pnsfw: Optional[float]=None, max_pwatermark: Optional[float]=0.5, min_aesthetic_score: Optional[float]=4.75, min_size: Optional[int]=256, is_sdxl_synthetic_dataset: bool=False, is_ds_clean_upscaled: bool=False, is_ds_clean: bool=False): if f'{train_shards_path_or_url}.yaml' in os.listdir('./configs'): with open(f'./configs/{train_shards_path_or_url}.yaml') as f: train_shards_path_or_url = yaml.safe_load(f) transform = ImageNetTransform(resolution, center_crop, random_flip) def tokenize(text): text = replace_person_token(text) input_ids = tokenizer(text, max_length=max_seq_length, padding='max_length', truncation=True, return_tensors='pt').input_ids return input_ids[0] if not isinstance(train_shards_path_or_url, str): train_shards_path_or_url = [list(braceexpand(urls)) for urls in train_shards_path_or_url] train_shards_path_or_url = list(itertools.chain.from_iterable(train_shards_path_or_url)) if not isinstance(eval_shards_path_or_url, str): eval_shards_path_or_url = [list(braceexpand(urls)) for urls in eval_shards_path_or_url] eval_shards_path_or_url = list(itertools.chain.from_iterable(eval_shards_path_or_url)) if not is_pre_encoded: processing_pipeline = [wds.decode('pil', handler=wds.ignore_and_continue), wds.rename(image='jpg;png;jpeg;webp', input_ids='text;txt;caption', orig_size='json', aesthetic_score='json', handler=wds.warn_and_continue), wds.map(filter_keys(set(['image', 'input_ids', 'orig_size', 'aesthetic_score']))), wds.map(partial(image_transform, resolution=resolution), handler=wds.warn_and_continue), wds.map_dict(input_ids=tokenize, orig_size=get_orig_size, aesthetic_score=get_aesthetic_score, handler=wds.warn_and_continue)] else: vae_checkpoint = vae_checkpoint.lower().replace('/', '.') text_encoder_checkpoint = text_encoder_checkpoint.lower().replace('/', '.') processing_pipeline = [wds.decode(wds.handle_extension('pth', wds.autodecode.torch_loads), handler=wds.ignore_and_continue), wds.rename(image_input_ids=f'{vae_checkpoint}.pth', encoder_hidden_states=f'{text_encoder_checkpoint}.pth', handler=wds.warn_and_continue), wds.map(filter_keys(set(['image_input_ids', 'encoder_hidden_states'])))] if is_sdxl_synthetic_dataset: select = wds.select(lambda sample: 'clip_scores.txt' in sample) elif use_filtered_dataset: select = wds.select(WebdatasetSelect(require_marked_as_ok_by_spawning=require_marked_as_ok_by_spawning, require_marked_as_not_getty=require_marked_as_not_getty, max_pnsfw=max_pnsfw, max_pwatermark=max_pwatermark, min_aesthetic_score=min_aesthetic_score, min_size=min_size)) else: select = None if is_sdxl_synthetic_dataset: map = wds.map(sdxl_synthetic_dataset_map, handler=wds.ignore_and_continue) elif is_ds_clean_upscaled: map = wds.map(ds_clean_upscaled_map) elif is_ds_clean: map = wds.map(ds_clean_map) else: map = None pipeline = [wds.ResampledShards(train_shards_path_or_url), tarfile_to_samples_nothrow, *([select] if select is not None else []), *([map] if map is not None else []), wds.shuffle(shuffle_buffer_size), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate)] num_batches = math.ceil(num_train_examples / global_batch_size) num_worker_batches = math.ceil(num_train_examples / (global_batch_size * num_workers)) num_batches = num_worker_batches * num_workers num_samples = num_batches * global_batch_size self._train_dataset = wds.DataPipeline(*pipeline).with_epoch(num_worker_batches) self._train_dataloader = wds.WebLoader(self._train_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers) self._train_dataloader.num_batches = num_batches self._train_dataloader.num_samples = num_samples pipeline = [wds.SimpleShardList(eval_shards_path_or_url), wds.split_by_worker, wds.tarfile_to_samples(handler=wds.ignore_and_continue), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate)] self._eval_dataset = wds.DataPipeline(*pipeline) self._eval_dataloader = wds.WebLoader(self._eval_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers) @property def train_dataset(self): return self._train_dataset @property def train_dataloader(self): return self._train_dataloader @property def eval_dataset(self): return self._eval_dataset @property def eval_dataloader(self): return self._eval_dataloader # File: open-muse-main/training/optimizer.py """""" import torch from torch.optim.optimizer import Optimizer class Lion(Optimizer): def __init__(self, params, lr=0.0001, betas=(0.9, 0.99), weight_decay=0.0, **kwargs): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay) super().__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue p.data.mul_(1 - group['lr'] * group['weight_decay']) grad = p.grad state = self.state[p] if len(state) == 0: state['exp_avg'] = torch.zeros_like(p) exp_avg = state['exp_avg'] (beta1, beta2) = group['betas'] update = exp_avg * beta1 + grad * (1 - beta1) p.add_(torch.sign(update), alpha=-group['lr']) exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2) return loss # File: open-muse-main/training/train_maskgit_imagenet.py import json import logging import math import os import time from pathlib import Path from typing import Any, List, Tuple import numpy as np import torch import torch.nn.functional as F import wandb from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedType, set_seed from data import ClassificationDataset from omegaconf import DictConfig, ListConfig, OmegaConf from optimizer import Lion from PIL import Image from torch.optim import AdamW import muse from muse import MOVQ, MaskGitTransformer, MaskGitVQGAN from muse.lr_schedulers import get_scheduler from muse.sampling import cosine_schedule try: import apex is_apex_available = True except ImportError: is_apex_available = False logger = get_logger(__name__, log_level='INFO') def get_config(): cli_conf = OmegaConf.from_cli() yaml_conf = OmegaConf.load(cli_conf.config) conf = OmegaConf.merge(yaml_conf, cli_conf) return conf def flatten_omega_conf(cfg: Any, resolve: bool=False) -> List[Tuple[str, Any]]: ret = [] def handle_dict(key: Any, value: Any, resolve: bool) -> List[Tuple[str, Any]]: return [(f'{key}.{k1}', v1) for (k1, v1) in flatten_omega_conf(value, resolve=resolve)] def handle_list(key: Any, value: Any, resolve: bool) -> List[Tuple[str, Any]]: return [(f'{key}.{idx}', v1) for (idx, v1) in flatten_omega_conf(value, resolve=resolve)] if isinstance(cfg, DictConfig): for (k, v) in cfg.items_ex(resolve=resolve): if isinstance(v, DictConfig): ret.extend(handle_dict(k, v, resolve=resolve)) elif isinstance(v, ListConfig): ret.extend(handle_list(k, v, resolve=resolve)) else: ret.append((str(k), v)) elif isinstance(cfg, ListConfig): for (idx, v) in enumerate(cfg._iter_ex(resolve=resolve)): if isinstance(v, DictConfig): ret.extend(handle_dict(idx, v, resolve=resolve)) elif isinstance(v, ListConfig): ret.extend(handle_list(idx, v, resolve=resolve)) else: ret.append((str(idx), v)) else: assert False return ret def get_vq_model_class(model_type): if model_type == 'movq': return MOVQ elif model_type == 'maskgit_vqgan': return MaskGitVQGAN else: raise ValueError(f'model_type {model_type} not supported for VQGAN') def soft_target_cross_entropy(logits, targets, soft_targets): logits = logits[:, 1:] targets = targets[:, 1:] logits = logits[..., :soft_targets.shape[-1]] log_probs = F.log_softmax(logits, dim=-1) padding_mask = targets.eq(-100) loss = torch.sum(-soft_targets * log_probs, dim=-1) loss.masked_fill_(padding_mask, 0.0) num_active_elements = padding_mask.numel() - padding_mask.long().sum() loss = loss.sum() / num_active_elements return loss class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def main(): config = get_config() if config.training.enable_tf32: torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False config.experiment.logging_dir = str(Path(config.experiment.output_dir) / 'logs') accelerator = Accelerator(gradient_accumulation_steps=config.training.gradient_accumulation_steps, mixed_precision=config.training.mixed_precision, log_with='wandb', logging_dir=config.experiment.logging_dir, split_batches=True) if accelerator.distributed_type == DistributedType.DEEPSPEED: accelerator.state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = config.training.batch_size logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: muse.logging.set_verbosity_info() else: muse.logging.set_verbosity_error() if accelerator.is_main_process: resume_wandb_run = config.experiment.resume_from_checkpoint run_id = config.wandb.get('run_id', None) if run_id is None: resume_wandb_run = False run_id = wandb.util.generate_id() config.wandb.run_id = run_id wandb_init_kwargs = dict(name=config.experiment.name, id=run_id, resume=resume_wandb_run, entity=config.wandb.get('entity', None), config_exclude_keys=[]) wandb_config = {k: v for (k, v) in flatten_omega_conf(config, resolve=True)} wandb_config.pop('experiment.resume_from_checkpoint') accelerator.init_trackers(config.experiment.project, config=wandb_config, init_kwargs={'wandb': wandb_init_kwargs}) if accelerator.is_main_process: os.makedirs(config.experiment.output_dir, exist_ok=True) config_path = Path(config.experiment.output_dir) / 'config.yaml' logging.info(f'Saving config to {config_path}') OmegaConf.save(config, config_path) if config.training.seed is not None: set_seed(config.training.seed) logger.info('Loading models and optimizer') vq_class = get_vq_model_class(config.model.vq_model.type) vq_model = vq_class.from_pretrained(config.model.vq_model.pretrained) model = MaskGitTransformer(**config.model.transformer) mask_id = model.config.mask_token_id vq_model.requires_grad_(False) if config.model.enable_xformers_memory_efficient_attention: model.enable_xformers_memory_efficient_attention() optimizer_config = config.optimizer.params learning_rate = optimizer_config.learning_rate if optimizer_config.scale_lr: learning_rate = learning_rate * config.training.batch_size * accelerator.num_processes * config.training.gradient_accumulation_steps optimizer_type = config.optimizer.name if optimizer_type == 'adamw': optimizer_cls = AdamW elif optimizer_type == 'fused_adamw': if is_apex_available: optimizer_cls = apex.optimizers.FusedAdam else: raise ImportError('Please install apex to use fused_adam') elif optimizer_type == 'lion': optimizer_cls = Lion else: raise ValueError(f'Optimizer {optimizer_type} not supported') optimizer = optimizer_cls(model.parameters(), lr=optimizer_config.learning_rate, betas=(optimizer_config.beta1, optimizer_config.beta2), weight_decay=optimizer_config.weight_decay, eps=optimizer_config.epsilon) logger.info('Creating dataloaders and lr_scheduler') total_batch_size_without_accum = config.training.batch_size * accelerator.num_processes total_batch_size = config.training.batch_size * accelerator.num_processes * config.training.gradient_accumulation_steps preproc_config = config.dataset.preprocessing dataset_config = config.dataset.params dataset = ClassificationDataset(train_shards_path_or_url=dataset_config.train_shards_path_or_url, eval_shards_path_or_url=dataset_config.eval_shards_path_or_url, num_train_examples=config.experiment.max_train_examples, per_gpu_batch_size=config.training.batch_size, global_batch_size=total_batch_size_without_accum, num_workers=dataset_config.num_workers, resolution=preproc_config.resolution, center_crop=preproc_config.center_crop, random_flip=preproc_config.random_flip, shuffle_buffer_size=dataset_config.shuffle_buffer_size, pin_memory=dataset_config.pin_memory, persistent_workers=dataset_config.persistent_workers) (train_dataloader, eval_dataloader) = (dataset.train_dataloader, dataset.eval_dataloader) lr_scheduler = get_scheduler(config.lr_scheduler.scheduler, optimizer=optimizer, num_training_steps=config.training.max_train_steps, num_warmup_steps=config.lr_scheduler.params.warmup_steps) logger.info('Preparing model, optimizer and dataloaders') (model, optimizer, lr_scheduler) = accelerator.prepare(model, optimizer, lr_scheduler) vq_model.to(accelerator.device) if config.training.overfit_one_batch: train_dataloader = [next(iter(train_dataloader))] num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / config.training.gradient_accumulation_steps) num_train_epochs = math.ceil(config.training.max_train_steps / num_update_steps_per_epoch) logger.info('***** Running training *****') logger.info(f' Num training steps = {config.training.max_train_steps}') logger.info(f' Gradient Accumulation steps = {config.training.gradient_accumulation_steps}') logger.info(f' Instantaneous batch size per device = {config.training.batch_size}') logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}') global_step = 0 first_epoch = 0 resume_from_checkpoint = config.experiment.resume_from_checkpoint if resume_from_checkpoint: if resume_from_checkpoint != 'latest': path = resume_from_checkpoint else: dirs = os.listdir(config.experiment.output_dir) dirs = [d for d in dirs if d.startswith('checkpoint')] dirs = sorted(dirs, key=lambda x: int(x.split('-')[1])) path = dirs[-1] if len(dirs) > 0 else None path = os.path.join(config.experiment.output_dir, path) if path is None: accelerator.print(f"Checkpoint '{resume_from_checkpoint}' does not exist. Starting a new training run.") resume_from_checkpoint = None else: accelerator.print(f'Resuming from checkpoint {path}') resume_lr_scheduler = config.experiment.get('resume_lr_scheduler', True) if not resume_lr_scheduler: logger.info('Not resuming the lr scheduler.') accelerator._schedulers = [] accelerator.load_state(path) accelerator.wait_for_everyone() if not resume_lr_scheduler: accelerator._schedulers = [lr_scheduler] global_step = int(os.path.basename(path).split('-')[1]) first_epoch = global_step // num_update_steps_per_epoch @torch.no_grad() def prepare_inputs_and_labels(pixel_values: torch.FloatTensor, class_ids: torch.LongTensor, min_masking_rate: float=0.0, is_train: bool=True): if config.training.use_soft_code_target and is_train: (soft_targets, image_tokens) = vq_model.get_soft_code(pixel_values, temp=config.training.soft_code_temp, stochastic=config.training.use_stochastic_code) else: image_tokens = vq_model.encode(pixel_values)[1] soft_targets = None (batch_size, seq_len) = image_tokens.shape timesteps = torch.rand(batch_size, device=image_tokens.device) mask_prob = cosine_schedule(timesteps) mask_prob = mask_prob.clip(min_masking_rate) num_token_masked = (seq_len * mask_prob).round().clamp(min=1) batch_randperm = torch.rand(batch_size, seq_len, device=image_tokens.device).argsort(dim=-1) mask = batch_randperm < num_token_masked.unsqueeze(-1) input_ids = torch.where(mask, mask_id, image_tokens) labels = torch.where(mask, image_tokens, -100) class_ids = class_ids + vq_model.num_embeddings input_ids = torch.cat([class_ids.unsqueeze(-1), input_ids], dim=-1) labels_mask = torch.ones_like(class_ids, device=image_tokens.device).unsqueeze(-1).fill_(-100) labels = torch.cat([labels_mask, labels], dim=-1) return (input_ids, labels, soft_targets, mask_prob) batch_time_m = AverageMeter() data_time_m = AverageMeter() end = time.time() for epoch in range(first_epoch, num_train_epochs): model.train() for batch in train_dataloader: (pixel_values, class_ids) = batch pixel_values = pixel_values.to(accelerator.device, non_blocking=True) class_ids = class_ids.to(accelerator.device, non_blocking=True) data_time_m.update(time.time() - end) (input_ids, labels, soft_targets, mask_prob) = prepare_inputs_and_labels(pixel_values, class_ids, config.training.min_masking_rate) if global_step == 0 and epoch == 0: logger.info('Input ids: {}'.format(input_ids)) logger.info('Labels: {}'.format(labels)) with accelerator.accumulate(model): if config.training.use_soft_code_target: logits = model(input_ids=input_ids) loss = soft_target_cross_entropy(logits, labels, soft_targets) else: (_, loss) = model(input_ids=input_ids, labels=labels, label_smoothing=config.training.label_smoothing) avg_loss = accelerator.gather(loss.repeat(config.training.batch_size)).mean() avg_masking_rate = accelerator.gather(mask_prob.repeat(config.training.batch_size)).mean() accelerator.backward(loss) if config.training.max_grad_norm is not None and accelerator.sync_gradients: accelerator.clip_grad_norm_(model.parameters(), config.training.max_grad_norm) optimizer.step() lr_scheduler.step() if accelerator.sync_gradients and (global_step + 1) % config.experiment.log_grad_norm_every == 0 and accelerator.is_main_process: log_grad_norm(model, accelerator, global_step + 1) if optimizer_type == 'fused_adamw': optimizer.zero_grad() else: optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: batch_time_m.update(time.time() - end) end = time.time() if (global_step + 1) % config.experiment.log_every == 0: samples_per_second_per_gpu = config.training.gradient_accumulation_steps * config.training.batch_size / batch_time_m.val logs = {'step_loss': avg_loss.item(), 'lr': lr_scheduler.get_last_lr()[0], 'avg_masking_rate': avg_masking_rate.item(), 'samples/sec/gpu': samples_per_second_per_gpu, 'data_time': data_time_m.val, 'batch_time': batch_time_m.val} accelerator.log(logs, step=global_step + 1) logger.info(f'Step: {global_step + 1} Loss: {avg_loss.item():0.4f} Data (t): {data_time_m.val:0.4f}, {samples_per_second_per_gpu:0.2f}/s/gpu Batch (t): {batch_time_m.val:0.4f} LR: {lr_scheduler.get_last_lr()[0]:0.6f}') batch_time_m.reset() data_time_m.reset() if (global_step + 1) % config.experiment.eval_every == 0 and accelerator.is_main_process: validate_model(model, eval_dataloader, accelerator, global_step + 1, prepare_inputs_and_labels) if (global_step + 1) % config.experiment.save_every == 0: save_checkpoint(model, config, accelerator, global_step + 1) if (global_step + 1) % config.experiment.generate_every == 0 and accelerator.is_main_process: generate_images(model, vq_model, accelerator, global_step + 1) global_step += 1 if global_step >= config.training.max_train_steps: break accelerator.wait_for_everyone() if accelerator.is_main_process: validate_model(model, eval_dataloader, accelerator, global_step, prepare_inputs_and_labels) save_checkpoint(model, config, accelerator, global_step) if accelerator.is_main_process: model = accelerator.unwrap_model(model) model.save_pretrained(config.experiment.output_dir) accelerator.end_training() @torch.no_grad() def validate_model(model, eval_dataloader, accelerator, global_step, prepare_inputs_and_labels): logger.info('Evaluating...') model.eval() eval_loss = 0 now = time.time() for (i, batch) in enumerate(eval_dataloader): (pixel_values, class_ids) = batch pixel_values = pixel_values.to(accelerator.device, non_blocking=True) class_ids = class_ids.to(accelerator.device, non_blocking=True) (input_ids, labels, _, _) = prepare_inputs_and_labels(pixel_values, class_ids, is_train=False) (_, loss) = model(input_ids=input_ids, labels=labels) eval_loss += loss.mean() eval_loss = eval_loss / (i + 1) eval_time = time.time() - now logger.info(f'Step: {global_step} Eval Loss: {eval_loss.item():0.4f} Eval time: {eval_time:0.2f} s') accelerator.log({'eval_loss': eval_loss.item()}, step=global_step) model.train() @torch.no_grad() def generate_images(model, vq_model, accelerator, global_step): logger.info('Generating images...') imagenet_class_names = ['Jay', 'Castle', 'coffee mug', 'desk', 'Husky', 'Valley', 'Red wine', 'Coral reef', 'Mixing bowl', 'Cleaver', 'Vine Snake', 'Bloodhound', 'Barbershop', 'Ski', 'Otter', 'Snowmobile'] imagenet_class_ids = torch.tensor([17, 483, 504, 526, 248, 979, 966, 973, 659, 499, 59, 163, 424, 795, 360, 802], device=accelerator.device, dtype=torch.long) model.eval() dtype = torch.float32 if accelerator.mixed_precision == 'fp16': dtype = torch.float16 elif accelerator.mixed_precision == 'bf16': dtype = torch.bfloat16 with torch.autocast('cuda', dtype=dtype, enabled=accelerator.mixed_precision != 'no'): gen_token_ids = accelerator.unwrap_model(model).generate2(imagenet_class_ids, timesteps=8) gen_token_ids = torch.clamp(gen_token_ids, max=accelerator.unwrap_model(model).config.codebook_size - 1) images = vq_model.decode_code(gen_token_ids) model.train() images = 2.0 * images - 1.0 images = torch.clamp(images, -1.0, 1.0) images = (images + 1.0) / 2.0 images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) pil_images = [Image.fromarray(image) for image in images] wandb_images = [wandb.Image(image, caption=imagenet_class_names[i]) for (i, image) in enumerate(pil_images)] wandb.log({'generated_images': wandb_images}, step=global_step) def save_checkpoint(model, config, accelerator, global_step): save_path = Path(config.experiment.output_dir) / f'checkpoint-{global_step}' state_dict = accelerator.get_state_dict(model) if accelerator.is_main_process: unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(save_path / 'unwrapped_model', save_function=accelerator.save, state_dict=state_dict) json.dump({'global_step': global_step}, (save_path / 'metadata.json').open('w+')) logger.info(f'Saved state to {save_path}') accelerator.save_state(save_path) def log_grad_norm(model, accelerator, global_step): for (name, param) in model.named_parameters(): if param.grad is not None: grads = param.grad.detach().data grad_norm = (grads.norm(p=2) / grads.numel()).item() accelerator.log({'grad_norm/' + name: grad_norm}, step=global_step) if __name__ == '__main__': main() # File: open-muse-main/training/train_muse.py import json import logging import math import os import random import shutil import time from functools import partial from pathlib import Path from typing import Any, List, Tuple, Union import numpy as np import plotly.express as px import torch import torch.nn.functional as F import torchvision.transforms.functional as TF import wandb from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedType, set_seed from data import ClassificationDataset, Text2ImageDataset from omegaconf import DictConfig, ListConfig, OmegaConf from optimizer import Lion from PIL import Image from torch.optim import AdamW from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel, T5Tokenizer import muse import muse.training_utils from muse import MOVQ, EMAModel, MaskGitTransformer, MaskGiTUViT, MaskGitVQGAN, PaellaVQModel, VQGANModel, get_mask_chedule from muse.lr_schedulers import get_scheduler try: import apex is_apex_available = True except ImportError: is_apex_available = False logger = get_logger(__name__, log_level='INFO') def get_config(): cli_conf = OmegaConf.from_cli() yaml_conf = OmegaConf.load(cli_conf.config) conf = OmegaConf.merge(yaml_conf, cli_conf) return conf def flatten_omega_conf(cfg: Any, resolve: bool=False) -> List[Tuple[str, Any]]: ret = [] def handle_dict(key: Any, value: Any, resolve: bool) -> List[Tuple[str, Any]]: return [(f'{key}.{k1}', v1) for (k1, v1) in flatten_omega_conf(value, resolve=resolve)] def handle_list(key: Any, value: Any, resolve: bool) -> List[Tuple[str, Any]]: return [(f'{key}.{idx}', v1) for (idx, v1) in flatten_omega_conf(value, resolve=resolve)] if isinstance(cfg, DictConfig): for (k, v) in cfg.items_ex(resolve=resolve): if isinstance(v, DictConfig): ret.extend(handle_dict(k, v, resolve=resolve)) elif isinstance(v, ListConfig): ret.extend(handle_list(k, v, resolve=resolve)) else: ret.append((str(k), v)) elif isinstance(cfg, ListConfig): for (idx, v) in enumerate(cfg._iter_ex(resolve=resolve)): if isinstance(v, DictConfig): ret.extend(handle_dict(idx, v, resolve=resolve)) elif isinstance(v, ListConfig): ret.extend(handle_list(idx, v, resolve=resolve)) else: ret.append((str(idx), v)) else: assert False return ret def get_vq_model_class(model_type): if model_type == 'vqgan': return VQGANModel elif model_type == 'movq': return MOVQ elif model_type == 'maskgit_vqgan': return MaskGitVQGAN elif model_type == 'paella_vq': return PaellaVQModel else: raise ValueError(f'model_type {model_type} not supported for VQGAN') def soft_target_cross_entropy(logits, targets, soft_targets): logits = logits[:, 1:] targets = targets[:, 1:] logits = logits[..., :soft_targets.shape[-1]] log_probs = F.log_softmax(logits, dim=-1) padding_mask = targets.eq(-100) loss = torch.sum(-soft_targets * log_probs, dim=-1) loss.masked_fill_(padding_mask, 0.0) num_active_elements = padding_mask.numel() - padding_mask.long().sum() loss = loss.sum() / num_active_elements return loss def get_loss_weight(t, mask, min_val=0.3): return 1 - (1 - mask) * ((1 - t) * (1 - min_val))[:, None] def mask_or_random_replace_tokens(image_tokens, mask_id, config, mask_schedule, is_train=True): (batch_size, seq_len) = image_tokens.shape if not is_train and config.training.get('eval_mask_ratios', None): mask_prob = random.choices(config.training.eval_mask_ratios, k=batch_size) mask_prob = torch.tensor(mask_prob, device=image_tokens.device) else: timesteps = torch.rand(batch_size, device=image_tokens.device) mask_prob = mask_schedule(timesteps) mask_prob = mask_prob.clip(config.training.min_masking_rate) num_token_masked = (seq_len * mask_prob).round().clamp(min=1) mask_contiguous_region_prob = config.training.get('mask_contiguous_region_prob', None) if mask_contiguous_region_prob is None: mask_contiguous_region = False else: mask_contiguous_region = random.random() < mask_contiguous_region_prob if not mask_contiguous_region: batch_randperm = torch.rand(batch_size, seq_len, device=image_tokens.device).argsort(dim=-1) mask = batch_randperm < num_token_masked.unsqueeze(-1) else: resolution = int(seq_len ** 0.5) mask = torch.zeros((batch_size, resolution, resolution), device=image_tokens.device) for (batch_idx, num_token_masked_) in enumerate(num_token_masked): num_token_masked_ = int(num_token_masked_.item()) num_token_masked_height = random.randint(math.ceil(num_token_masked_ / resolution), min(resolution, num_token_masked_)) num_token_masked_height = min(num_token_masked_height, resolution) num_token_masked_width = math.ceil(num_token_masked_ / num_token_masked_height) num_token_masked_width = min(num_token_masked_width, resolution) start_idx_height = random.randint(0, resolution - num_token_masked_height) start_idx_width = random.randint(0, resolution - num_token_masked_width) mask[batch_idx, start_idx_height:start_idx_height + num_token_masked_height, start_idx_width:start_idx_width + num_token_masked_width] = 1 mask = mask.reshape(batch_size, seq_len) mask = mask.to(torch.bool) if config.training.get('noise_type', 'mask'): input_ids = torch.where(mask, mask_id, image_tokens) elif config.training.get('noise_type', 'random_replace'): random_tokens = torch.randint_like(image_tokens, low=0, high=config.model.codebook_size, device=image_tokens.device) input_ids = torch.where(mask, random_tokens, image_tokens) else: raise ValueError(f'noise_type {config.training.noise_type} not supported') if config.training.get('predict_all_tokens', False) or config.training.get('noise_type', 'mask') == 'random_replace': labels = image_tokens loss_weight = get_loss_weight(mask_prob, mask.long()) else: labels = torch.where(mask, image_tokens, -100) loss_weight = None return (input_ids, labels, loss_weight, mask_prob) class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def main(): config = get_config() if config.training.enable_tf32: torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False config.experiment.logging_dir = str(Path(config.experiment.output_dir) / 'logs') accelerator = Accelerator(gradient_accumulation_steps=config.training.gradient_accumulation_steps, mixed_precision=config.training.mixed_precision, log_with='wandb', project_dir=config.experiment.logging_dir, split_batches=True) if accelerator.distributed_type == DistributedType.DEEPSPEED: accelerator.state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = config.training.batch_size logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: muse.logging.set_verbosity_info() else: muse.logging.set_verbosity_error() if accelerator.is_main_process: resume_wandb_run = config.experiment.resume_from_checkpoint run_id = config.wandb.get('run_id', None) if run_id is None: resume_wandb_run = False run_id = wandb.util.generate_id() config.wandb.run_id = run_id wandb_init_kwargs = dict(name=config.experiment.name, id=run_id, resume=resume_wandb_run, entity=config.wandb.get('entity', None), config_exclude_keys=[]) wandb_config = {k: v for (k, v) in flatten_omega_conf(config, resolve=True)} wandb_config.pop('experiment.resume_from_checkpoint') accelerator.init_trackers(config.experiment.project, config=wandb_config, init_kwargs={'wandb': wandb_init_kwargs}) if accelerator.is_main_process: os.makedirs(config.experiment.output_dir, exist_ok=True) config_path = Path(config.experiment.output_dir) / 'config.yaml' logging.info(f'Saving config to {config_path}') OmegaConf.save(config, config_path) if config.training.seed is not None: set_seed(config.training.seed) logger.info('Loading models and optimizer') is_pre_encode = config.training.get('pre_encode', False) if not is_pre_encode: if config.model.text_encoder.type == 'clip': text_encoder_cls = CLIPTextModelWithProjection if config.model.transformer.get('add_cond_embeds', False) else CLIPTextModel text_encoder = text_encoder_cls.from_pretrained(config.model.text_encoder.pretrained, projection_dim=768) tokenizer = CLIPTokenizer.from_pretrained(config.model.text_encoder.pretrained) if config.model.text_encoder.get('pad_token_id', None): tokenizer.pad_token_id = config.model.text_encoder.pad_token_id elif config.model.text_encoder.type == 't5': text_encoder = T5EncoderModel.from_pretrained(config.model.text_encoder.pretrained) tokenizer = T5Tokenizer.from_pretrained(config.model.text_encoder.pretrained) else: raise ValueError(f'Unknown text model type: {config.model.text_encoder.type}') vq_class = get_vq_model_class(config.model.vq_model.type) vq_model = vq_class.from_pretrained(config.model.vq_model.pretrained) text_encoder.requires_grad_(False) vq_model.requires_grad_(False) else: text_encoder = None tokenizer = None vq_model = None model_cls = MaskGitTransformer if config.model.get('architecture', 'transformer') == 'transformer' else MaskGiTUViT if config.model.get('pretrained_model_path', None) is not None: model = model_cls.from_pretrained(config.model.pretrained_model_path) else: model = model_cls(**config.model.transformer) mask_id = model.config.mask_token_id output_size = model.output_size if config.training.get('use_ema', False): ema = EMAModel(model.parameters(), decay=config.training.ema_decay, update_after_step=config.training.ema_update_after_step, update_every=config.training.ema_update_every, model_cls=model_cls, model_config=model.config) def load_model_hook(models, input_dir): load_model = EMAModel.from_pretrained(os.path.join(input_dir, 'ema_model'), model_cls=model_cls) ema.load_state_dict(load_model.state_dict()) ema.to(accelerator.device) del load_model def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: ema.save_pretrained(os.path.join(output_dir, 'ema_model')) accelerator.register_load_state_pre_hook(load_model_hook) accelerator.register_save_state_pre_hook(save_model_hook) if config.model.enable_xformers_memory_efficient_attention: model.enable_xformers_memory_efficient_attention() optimizer_config = config.optimizer.params learning_rate = optimizer_config.learning_rate if optimizer_config.scale_lr: learning_rate = learning_rate * config.training.batch_size * accelerator.num_processes * config.training.gradient_accumulation_steps optimizer_type = config.optimizer.name if optimizer_type == 'adamw': optimizer_cls = AdamW elif optimizer_type == 'fused_adamw': if is_apex_available: optimizer_cls = apex.optimizers.FusedAdam else: raise ImportError('Please install apex to use fused_adam') elif optimizer_type == '8bit_adamw': try: import bitsandbytes as bnb except ImportError: raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.') optimizer_cls = bnb.optim.AdamW8bit elif optimizer_type == 'lion': optimizer_cls = Lion else: raise ValueError(f'Optimizer {optimizer_type} not supported') no_decay = ['bias', 'layer_norm.weight', 'mlm_ln.weight', 'embeddings.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': optimizer_config.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}] optimizer = optimizer_cls(optimizer_grouped_parameters, lr=optimizer_config.learning_rate, betas=(optimizer_config.beta1, optimizer_config.beta2), weight_decay=optimizer_config.weight_decay, eps=optimizer_config.epsilon) if config.get('mask_schedule', None) is not None: schedule = config.mask_schedule.schedule args = config.mask_schedule.get('params', {}) mask_schedule = get_mask_chedule(schedule, **args) else: mask_schedule = get_mask_chedule(config.training.get('mask_schedule', 'cosine')) logger.info('Creating dataloaders and lr_scheduler') total_batch_size_without_accum = config.training.batch_size * accelerator.num_processes total_batch_size = config.training.batch_size * accelerator.num_processes * config.training.gradient_accumulation_steps preproc_config = config.dataset.preprocessing dataset_config = config.dataset.params if config.dataset.type == 'classification': dataset_cls = partial(ClassificationDataset, return_text=True, imagenet_class_mapping_path=dataset_config.imagenet_class_mapping_path) else: dataset_cls = Text2ImageDataset dataset = dataset_cls(train_shards_path_or_url=dataset_config.train_shards_path_or_url, eval_shards_path_or_url=dataset_config.eval_shards_path_or_url, tokenizer=tokenizer, max_seq_length=preproc_config.max_seq_length, num_train_examples=config.experiment.max_train_examples, per_gpu_batch_size=config.training.batch_size, global_batch_size=total_batch_size_without_accum, num_workers=dataset_config.num_workers, resolution=preproc_config.resolution, center_crop=preproc_config.center_crop, random_flip=preproc_config.random_flip, shuffle_buffer_size=dataset_config.shuffle_buffer_size, pin_memory=dataset_config.pin_memory, persistent_workers=dataset_config.persistent_workers, is_pre_encoded=is_pre_encode, vae_checkpoint=config.model.vq_model.pretrained, text_encoder_checkpoint=config.model.text_encoder.pretrained, use_filtered_dataset=dataset_config.get('use_filtered_dataset', False), require_marked_as_ok_by_spawning=dataset_config.get('require_marked_as_ok_by_spawning', False), require_marked_as_not_getty=dataset_config.get('require_marked_as_not_getty', False), max_pnsfw=dataset_config.get('max_pnsfw', None), max_pwatermark=dataset_config.get('max_pwatermark', 0.5), min_aesthetic_score=dataset_config.get('min_aesthetic_score', 4.75), min_size=dataset_config.get('min_size', 256), is_sdxl_synthetic_dataset=dataset_config.get('is_sdxl_synthetic_dataset', False), is_ds_clean_upscaled=dataset_config.get('is_ds_clean_upscaled', False), is_ds_clean=dataset_config.get('is_ds_clean', False)) (train_dataloader, eval_dataloader) = (dataset.train_dataloader, dataset.eval_dataloader) lr_scheduler = get_scheduler(config.lr_scheduler.scheduler, optimizer=optimizer, num_training_steps=config.training.max_train_steps, num_warmup_steps=config.lr_scheduler.params.warmup_steps) logger.info('Preparing model, optimizer and dataloaders') (model, optimizer, lr_scheduler) = accelerator.prepare(model, optimizer, lr_scheduler) weight_dtype = torch.float32 if accelerator.mixed_precision == 'fp16': weight_dtype = torch.float16 elif accelerator.mixed_precision == 'bf16': weight_dtype = torch.bfloat16 if not is_pre_encode: text_encoder.to(device=accelerator.device, dtype=weight_dtype) vq_model.to(device=accelerator.device) if config.training.get('use_ema', False): ema.to(accelerator.device) if not is_pre_encode and config.model.transformer.get('use_empty_embeds_for_uncond', False): empty_input = tokenizer('', padding='max_length', return_tensors='pt').input_ids.to(accelerator.device) outputs = text_encoder(empty_input, output_hidden_states=True) if config.model.transformer.get('add_cond_embeds', False): empty_embeds = outputs.hidden_states[-2] empty_clip_embeds = outputs[0] else: empty_embeds = outputs.last_hidden_state empty_clip_embeds = None else: empty_embeds = None empty_clip_embeds = None if config.training.overfit_one_batch: train_dataloader = [next(iter(train_dataloader))] num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / config.training.gradient_accumulation_steps) num_train_epochs = math.ceil(config.training.max_train_steps / num_update_steps_per_epoch) logger.info('***** Running training *****') logger.info(f' Num training steps = {config.training.max_train_steps}') logger.info(f' Instantaneous batch size per device = {config.training.batch_size}') logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}') logger.info(f' Gradient Accumulation steps = {config.training.gradient_accumulation_steps}') global_step = 0 first_epoch = 0 resume_from_checkpoint = config.experiment.resume_from_checkpoint if resume_from_checkpoint: if resume_from_checkpoint != 'latest': path = resume_from_checkpoint else: dirs = os.listdir(config.experiment.output_dir) dirs = [d for d in dirs if d.startswith('checkpoint')] dirs = sorted(dirs, key=lambda x: int(x.split('-')[1])) path = dirs[-1] if len(dirs) > 0 else None if path is not None: path = os.path.join(config.experiment.output_dir, path) if path is None: accelerator.print(f"Checkpoint '{resume_from_checkpoint}' does not exist. Starting a new training run.") resume_from_checkpoint = None else: accelerator.print(f'Resuming from checkpoint {path}') resume_lr_scheduler = config.experiment.get('resume_lr_scheduler', True) dont_resume_optimizer = config.experiment.get('dont_resume_optimizer', False) if not resume_lr_scheduler: logger.info('Not resuming the lr scheduler.') accelerator._schedulers = [] if dont_resume_optimizer: logger.info('Not resuming the optimizer.') accelerator._optimizers = [] grad_scaler = accelerator.scaler accelerator.scaler = None accelerator.load_state(path) if not resume_lr_scheduler: accelerator._schedulers = [lr_scheduler] if dont_resume_optimizer: accelerator._optimizers = [optimizer] accelerator.scaler = grad_scaler global_step = int(os.path.basename(path).split('-')[1]) first_epoch = global_step // num_update_steps_per_epoch @torch.no_grad() def prepare_inputs_and_labels(pixel_values_or_image_ids: Union[torch.FloatTensor, torch.LongTensor], text_input_ids_or_embeds: Union[torch.LongTensor, torch.LongTensor], min_masking_rate: float=0.0, batch: Any=None, is_train: bool=True): if is_pre_encode: image_tokens = pixel_values_or_image_ids soft_targets = None elif config.training.use_soft_code_target and is_train: (soft_targets, image_tokens) = vq_model.get_soft_code(pixel_values_or_image_ids, temp=config.training.soft_code_temp, stochastic=config.training.use_stochastic_code) else: soft_targets = None if config.training.get('split_vae_encode', False): split_batch_size = config.training.split_vae_encode batch_size = pixel_values_or_image_ids.shape[0] num_splits = math.ceil(batch_size / split_batch_size) image_tokens = [] for i in range(num_splits): start_idx = i * split_batch_size end_idx = min((i + 1) * split_batch_size, batch_size) image_tokens.append(vq_model.get_code(pixel_values_or_image_ids[start_idx:end_idx])) image_tokens = torch.cat(image_tokens, dim=0) else: image_tokens = vq_model.get_code(pixel_values_or_image_ids) if not is_pre_encode: if config.model.transformer.get('add_cond_embeds', False): outputs = text_encoder(text_input_ids_or_embeds, return_dict=True, output_hidden_states=True) encoder_hidden_states = outputs.hidden_states[-2] clip_embeds = outputs[0] else: encoder_hidden_states = text_encoder(text_input_ids_or_embeds)[0] clip_embeds = None if config.model.transformer.get('add_micro_cond_embeds', False): original_sizes = list(map(list, zip(*batch['orig_size']))) crop_coords = list(map(list, zip(*batch['crop_coords']))) aesthetic_scores = batch['aesthetic_score'] micro_conds = torch.cat([torch.tensor(original_sizes), torch.tensor(crop_coords), aesthetic_scores.unsqueeze(-1)], dim=-1) micro_conds = micro_conds.to(encoder_hidden_states.device, dtype=encoder_hidden_states.dtype, non_blocking=True) else: micro_conds = None else: encoder_hidden_states = text_input_ids_or_embeds clip_embeds = None (input_ids, labels, loss_weight, mask_prob) = mask_or_random_replace_tokens(image_tokens, mask_id, config, mask_schedule=mask_schedule, is_train=is_train) return (input_ids, encoder_hidden_states, labels, soft_targets, mask_prob, loss_weight, clip_embeds, micro_conds) batch_time_m = AverageMeter() data_time_m = AverageMeter() end = time.time() for epoch in range(first_epoch, num_train_epochs): model.train() for batch in train_dataloader: if is_pre_encode: (pixel_values, input_ids) = (batch['image_input_ids'], batch['encoder_hidden_states']) else: (pixel_values, input_ids) = (batch['image'], batch['input_ids']) pixel_values = pixel_values.to(accelerator.device, non_blocking=True) input_ids = input_ids.to(accelerator.device, non_blocking=True) data_time_m.update(time.time() - end) (input_ids, encoder_hidden_states, labels, soft_targets, mask_prob, loss_weight, clip_embeds, micro_conds) = prepare_inputs_and_labels(pixel_values, input_ids, config.training.min_masking_rate, batch=batch) if global_step == 0 and epoch == 0: logger.info('Input ids: {}'.format(input_ids)) logger.info('Labels: {}'.format(labels)) if config.training.cond_dropout_prob > 0.0: assert encoder_hidden_states is not None batch_size = encoder_hidden_states.shape[0] mask = torch.zeros((batch_size, 1, 1), device=encoder_hidden_states.device).float().uniform_(0, 1) < config.training.cond_dropout_prob empty_embeds_ = empty_embeds.expand(batch_size, -1, -1) encoder_hidden_states = torch.where((encoder_hidden_states * mask).bool(), encoder_hidden_states, empty_embeds_) empty_clip_embeds_ = empty_clip_embeds.expand(batch_size, -1) cond_embeds = torch.where((clip_embeds * mask.squeeze(-1)).bool(), clip_embeds, empty_clip_embeds_) with accelerator.accumulate(model): if config.training.use_soft_code_target: logits = model(input_ids=input_ids, encoder_hidden_states=encoder_hidden_states) loss = soft_target_cross_entropy(logits, labels, soft_targets) else: (logits, loss) = model(input_ids=input_ids, encoder_hidden_states=encoder_hidden_states, labels=labels, label_smoothing=config.training.label_smoothing, cond_embeds=cond_embeds, loss_weight=loss_weight, micro_conds=micro_conds) avg_loss = accelerator.gather(loss.repeat(config.training.batch_size)).mean() avg_masking_rate = accelerator.gather(mask_prob.repeat(config.training.batch_size)).mean() accelerator.backward(loss) if config.training.max_grad_norm is not None and accelerator.sync_gradients: accelerator.clip_grad_norm_(model.parameters(), config.training.max_grad_norm) optimizer.step() lr_scheduler.step() if accelerator.sync_gradients and (global_step + 1) % config.experiment.log_grad_norm_every == 0 and accelerator.is_main_process: log_grad_norm(model, accelerator, global_step + 1) if optimizer_type == 'fused_adamw': optimizer.zero_grad() else: optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: if config.training.get('use_ema', False): ema.step(model.parameters()) batch_time_m.update(time.time() - end) end = time.time() if (global_step + 1) % config.experiment.log_every == 0: samples_per_second_per_gpu = config.training.gradient_accumulation_steps * config.training.batch_size / batch_time_m.val logs = {'step_loss': avg_loss.item(), 'lr': lr_scheduler.get_last_lr()[0], 'avg_masking_rate': avg_masking_rate.item(), 'samples/sec/gpu': samples_per_second_per_gpu, 'data_time': data_time_m.val, 'batch_time': batch_time_m.val} accelerator.log(logs, step=global_step + 1) logger.info(f'Step: {global_step + 1} Loss: {avg_loss.item():0.4f} Data (t): {data_time_m.val:0.4f}, {samples_per_second_per_gpu:0.2f}/s/gpu Batch (t): {batch_time_m.val:0.4f} LR: {lr_scheduler.get_last_lr()[0]:0.6f}') batch_time_m.reset() data_time_m.reset() if 'log_pixel_entropy_every' in config.experiment and (global_step + 1) % config.experiment.log_pixel_entropy_every == 0 and accelerator.is_main_process: log_pixel_entropy(logits, input_ids, mask_id, accelerator, global_step + 1) if 'log_image_entropy_every' in config.experiment and (global_step + 1) % config.experiment.log_image_entropy_every == 0 and accelerator.is_main_process: log_image_entropy(logits, input_ids, mask_id, accelerator, global_step + 1) if 'log_cross_entropy_every' in config.experiment and (global_step + 1) % config.experiment.log_cross_entropy_every == 0 and accelerator.is_main_process: log_cross_entropy(logits, labels, input_ids, mask_id, output_size, config.training.label_smoothing, accelerator, global_step + 1) if 'log_token_probability_distributions_every' in config.experiment and (global_step + 1) % config.experiment.log_token_probability_distributions_every == 0 and accelerator.is_main_process: log_token_probability_distributions(logits, input_ids, mask_id, accelerator, global_step + 1) if (global_step + 1) % config.experiment.save_every == 0: save_checkpoint(model, config, accelerator, global_step + 1) if (global_step + 1) % config.experiment.eval_every == 0 and accelerator.is_main_process: if config.training.get('use_ema', False): ema.store(model.parameters()) ema.copy_to(model.parameters()) validate_model(model, eval_dataloader, accelerator, global_step + 1, prepare_inputs_and_labels, config.experiment.get('max_eval_examples', None)) if config.training.get('use_ema', False): ema.restore(model.parameters()) if (global_step + 1) % config.experiment.generate_every == 0 and accelerator.is_main_process: if config.training.get('use_ema', False): ema.store(model.parameters()) ema.copy_to(model.parameters()) generate_images(model, vq_model, text_encoder, tokenizer, accelerator, config, global_step + 1, mask_schedule=mask_schedule, empty_embeds=empty_embeds, empty_clip_embeds=empty_clip_embeds) generate_inpainting_images(model, vq_model, text_encoder, tokenizer, accelerator, config, global_step + 1, mask_schedule=mask_schedule, empty_embeds=empty_embeds, empty_clip_embeds=empty_clip_embeds) if config.training.get('use_ema', False): ema.restore(model.parameters()) global_step += 1 if global_step >= config.training.max_train_steps: break accelerator.wait_for_everyone() if accelerator.is_main_process: validate_model(model, eval_dataloader, accelerator, global_step, prepare_inputs_and_labels, config.experiment.get('max_eval_examples', None)) save_checkpoint(model, config, accelerator, global_step) if accelerator.is_main_process: model = accelerator.unwrap_model(model) if config.training.get('use_ema', False): ema.copy_to(model.parameters()) model.save_pretrained(config.experiment.output_dir) accelerator.end_training() @torch.no_grad() def validate_model(model, eval_dataloader, accelerator, global_step, prepare_inputs_and_labels, max_eval_examples=None): logger.info('Evaluating...') model.eval() eval_loss = 0 now = time.time() samples_taken = 0 for (i, batch) in enumerate(eval_dataloader): (pixel_values, input_ids) = (batch['image'], batch['input_ids']) pixel_values = pixel_values.to(accelerator.device, non_blocking=True) input_ids = input_ids.to(accelerator.device, non_blocking=True) (input_ids, encoder_hidden_states, labels, _, _, loss_weight, clip_embeds, micro_conds) = prepare_inputs_and_labels(pixel_values, input_ids, batch=batch, is_train=False) (_, loss) = model(input_ids=input_ids, encoder_hidden_states=encoder_hidden_states, labels=labels, cond_embeds=clip_embeds, loss_weight=loss_weight, micro_conds=micro_conds) eval_loss += loss.mean() samples_taken += input_ids.shape[0] if max_eval_examples is not None and samples_taken >= max_eval_examples: break eval_loss = eval_loss / (i + 1) eval_time = time.time() - now logger.info(f'Step: {global_step} Eval Loss: {eval_loss.item():0.4f} Eval time: {eval_time:0.2f} s') accelerator.log({'eval_loss': eval_loss.item()}, step=global_step) model.train() @torch.no_grad() def generate_images(model, vq_model, text_encoder, tokenizer, accelerator, config, global_step, mask_schedule, empty_embeds=None, empty_clip_embeds=None): logger.info('Generating images...') model.eval() imagenet_class_names = ['jay', 'castle', 'coffee mug', 'desk', 'Eskimo dog, husky', 'valley, vale', 'red wine', 'coral reef', 'mixing bowl', 'cleaver, meat cleaver, chopper', 'vine snake', 'bloodhound, sleuthhound', 'barbershop', 'ski', 'otter', 'snowmobile'] if config.dataset.params.validation_prompts_file is not None: with open(config.dataset.params.validation_prompts_file, 'r') as f: validation_prompts = f.read().splitlines() else: validation_prompts = imagenet_class_names if config.training.get('pre_encode', False): if config.model.text_encoder.type == 'clip': text_encoder = CLIPTextModel.from_pretrained(config.model.text_encoder.pretrained) tokenizer = CLIPTokenizer.from_pretrained(config.model.text_encoder.pretrained) elif config.model.text_encoder.type == 't5': text_encoder = T5EncoderModel.from_pretrained(config.model.text_encoder.pretrained) tokenizer = T5Tokenizer.from_pretrained(config.model.text_encoder.pretrained) else: raise ValueError(f'Unknown text model type: {config.model.text_encoder.type}') vq_class = get_vq_model_class(config.model.vq_model.type) vq_model = vq_class.from_pretrained(config.model.vq_model.pretrained) if accelerator.mixed_precision == 'fp16': weight_dtype = torch.float16 elif accelerator.mixed_precision == 'bf16': weight_dtype = torch.bfloat16 text_encoder.to(device=accelerator.device, dtype=weight_dtype) vq_model.to(accelerator.device) input_ids = tokenizer(validation_prompts, return_tensors='pt', padding='max_length', truncation=True, max_length=config.dataset.preprocessing.max_seq_length).input_ids if config.model.transformer.get('add_cond_embeds', False): outputs = text_encoder(input_ids.to(accelerator.device), return_dict=True, output_hidden_states=True) encoder_hidden_states = outputs.hidden_states[-2] clip_embeds = outputs[0] else: encoder_hidden_states = text_encoder(input_ids.to(accelerator.device)).last_hidden_state clip_embeds = None if config.model.transformer.get('add_micro_cond_embeds', False): resolution = config.dataset.preprocessing.resolution micro_conds = torch.tensor([resolution, resolution, 0, 0, 6], device=encoder_hidden_states.device, dtype=encoder_hidden_states.dtype) micro_conds = micro_conds.unsqueeze(0).repeat(encoder_hidden_states.shape[0], 1) if config.training.get('pre_encode', False): del text_encoder with torch.autocast('cuda', dtype=encoder_hidden_states.dtype, enabled=accelerator.mixed_precision != 'no'): gen_token_ids = accelerator.unwrap_model(model).generate2(encoder_hidden_states=encoder_hidden_states, cond_embeds=clip_embeds, empty_embeds=empty_embeds, empty_cond_embeds=empty_clip_embeds, micro_conds=micro_conds, guidance_scale=config.training.guidance_scale, temperature=config.training.get('generation_temperature', 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get('noise_type', 'mask'), predict_all_tokens=config.training.get('predict_all_tokens', False), seq_len=config.model.transformer.num_vq_tokens) gen_token_ids = torch.clamp(gen_token_ids, max=accelerator.unwrap_model(model).config.codebook_size - 1) if config.training.get('split_vae_encode', False): split_batch_size = config.training.split_vae_encode batch_size = gen_token_ids.shape[0] num_splits = math.ceil(batch_size / split_batch_size) images = [] for i in range(num_splits): start_idx = i * split_batch_size end_idx = min((i + 1) * split_batch_size, batch_size) images.append(vq_model.decode_code(gen_token_ids[start_idx:end_idx])) images = torch.cat(images, dim=0) else: images = vq_model.decode_code(gen_token_ids) model.train() if config.training.get('pre_encode', False): del vq_model images = 2.0 * images - 1.0 images = torch.clamp(images, -1.0, 1.0) images = (images + 1.0) / 2.0 images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) pil_images = [Image.fromarray(image) for image in images] wandb_images = [wandb.Image(image, caption=validation_prompts[i]) for (i, image) in enumerate(pil_images)] wandb.log({'generated_images': wandb_images}, step=global_step) @torch.no_grad() def generate_inpainting_images(model, vq_model, text_encoder, tokenizer, accelerator, config, global_step, mask_schedule, empty_embeds=None, empty_clip_embeds=None): assert not config.training.get('pre_encode', False) model.eval() mask_token_id = config.model.transformer.vocab_size - 1 (validation_prompts, validation_images, validation_masks) = inpainting_validation_data() validation_masks = validation_masks_to_latent_tensors(validation_masks).to(accelerator.device) validation_images = torch.stack([TF.to_tensor(x) for x in validation_images]) validation_images = validation_images.to(accelerator.device) (_, validation_images) = vq_model.encode(validation_images) validation_images[validation_masks] = mask_token_id token_input_ids = tokenizer(validation_prompts, return_tensors='pt', padding='max_length', truncation=True, max_length=config.dataset.preprocessing.max_seq_length).input_ids if config.model.transformer.get('add_cond_embeds', False): outputs = text_encoder(token_input_ids.to(accelerator.device), return_dict=True, output_hidden_states=True) encoder_hidden_states = outputs.hidden_states[-2] clip_embeds = outputs[0] else: encoder_hidden_states = text_encoder(token_input_ids.to(accelerator.device)).last_hidden_state clip_embeds = None if config.model.transformer.get('add_micro_cond_embeds', False): resolution = config.dataset.preprocessing.resolution micro_conds = torch.tensor([resolution, resolution, 0, 0, 6], device=encoder_hidden_states.device, dtype=encoder_hidden_states.dtype) micro_conds = micro_conds.unsqueeze(0).repeat(encoder_hidden_states.shape[0], 1) with torch.autocast('cuda', dtype=encoder_hidden_states.dtype, enabled=accelerator.mixed_precision != 'no'): gen_token_ids = accelerator.unwrap_model(model).generate2(input_ids=validation_images, encoder_hidden_states=encoder_hidden_states, cond_embeds=clip_embeds, empty_embeds=empty_embeds, empty_cond_embeds=empty_clip_embeds, micro_conds=micro_conds, guidance_scale=config.training.guidance_scale, temperature=config.training.get('generation_temperature', 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get('noise_type', 'mask'), predict_all_tokens=config.training.get('predict_all_tokens', False)) gen_token_ids = torch.clamp(gen_token_ids, max=accelerator.unwrap_model(model).config.codebook_size - 1) if config.training.get('split_vae_encode', False): split_batch_size = config.training.split_vae_encode batch_size = gen_token_ids.shape[0] num_splits = math.ceil(batch_size / split_batch_size) images = [] for i in range(num_splits): start_idx = i * split_batch_size end_idx = min((i + 1) * split_batch_size, batch_size) images.append(vq_model.decode_code(gen_token_ids[start_idx:end_idx])) images = torch.cat(images, dim=0) else: images = vq_model.decode_code(gen_token_ids) images = 2.0 * images - 1.0 images = torch.clamp(images, -1.0, 1.0) images = (images + 1.0) / 2.0 images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) pil_images = [Image.fromarray(image) for image in images] wandb_images = [wandb.Image(image, caption=validation_prompts[i]) for (i, image) in enumerate(pil_images)] wandb.log({'generated_inpainting_images': wandb_images}, step=global_step) model.train() def inpainting_validation_data(): validation_prompts = [] validation_images = [] validation_masks = [] for folder_name in os.listdir('./inpainting_validation'): validation_prompts.append(folder_name) image = None mask = None for file_name in os.listdir(f'./inpainting_validation/{folder_name}'): if file_name.startswith('image'): image = Image.open(f'./inpainting_validation/{folder_name}/{file_name}') if file_name.startswith('mask'): mask = Image.open(f'./inpainting_validation/{folder_name}/{file_name}').convert('L') assert image is not None, f'could not find inpainting validation image under {folder_name}' assert mask is not None, f'could not find inpainting validation mask under {folder_name}' validation_images.append(image) validation_masks.append(mask) return (validation_prompts, validation_images, validation_masks) def validation_masks_to_latent_tensors(validation_masks): validation_masks_ = [] for mask in validation_masks: mask = mask.resize((mask.height // 16, mask.width // 16)) mask = np.array(mask) mask = mask / 255 mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = mask.reshape(-1) mask = mask.astype(bool) validation_masks_.append(mask) validation_masks_ = np.stack(validation_masks_) return torch.from_numpy(validation_masks_) def save_checkpoint(model, config, accelerator, global_step): output_dir = config.experiment.output_dir checkpoints_total_limit = config.experiment.get('checkpoints_total_limit', None) if accelerator.is_main_process and checkpoints_total_limit is not None: checkpoints = os.listdir(output_dir) checkpoints = [d for d in checkpoints if d.startswith('checkpoint')] checkpoints = sorted(checkpoints, key=lambda x: int(x.split('-')[1])) if len(checkpoints) >= checkpoints_total_limit: num_to_remove = len(checkpoints) - checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info(f'{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints') logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = Path(output_dir) / f'checkpoint-{global_step}' state_dict = accelerator.get_state_dict(model) if accelerator.is_main_process: unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(save_path / 'unwrapped_model', save_function=accelerator.save, state_dict=state_dict) json.dump({'global_step': global_step}, (save_path / 'metadata.json').open('w+')) logger.info(f'Saved state to {save_path}') accelerator.save_state(save_path) logger.info(f'Saved state to {save_path}') def log_grad_norm(model, accelerator, global_step): for (name, param) in model.named_parameters(): if param.grad is not None: grads = param.grad.detach().data grad_norm = (grads.norm(p=2) / grads.numel()).item() accelerator.log({'grad_norm/' + name: grad_norm}, step=global_step) @torch.no_grad() def log_pixel_entropy(logits, input_ids, mask_id, accelerator, global_step): pixel_entropy_per_percent_masked_bucket = muse.training_utils.pixel_entropy_per_percent_masked_bucket(logits, input_ids, mask_id) entropy_log = {} for (bucket, bucket_entropy) in enumerate(pixel_entropy_per_percent_masked_bucket): bucket_entropy = bucket_entropy.item() if bucket_entropy != 0: entropy_log[f'bucket {bucket}'] = bucket_entropy accelerator.log({'pixel_entropy/stats': entropy_log}, step=global_step) @torch.no_grad() def log_image_entropy(logits, input_ids, mask_id, accelerator, global_step): image_entropy_per_percent_masked_bucket = muse.training_utils.image_entropy_per_percent_masked_bucket(logits, input_ids, mask_id) entropy_log = {} for (bucket, bucket_entropy) in enumerate(image_entropy_per_percent_masked_bucket): bucket_entropy = bucket_entropy.item() if bucket_entropy != 0: entropy_log[f'bucket {bucket}'] = bucket_entropy accelerator.log({'image_entropy/stats': entropy_log}, step=global_step) @torch.no_grad() def log_cross_entropy(logits, labels, input_ids, mask_id, output_size, label_smoothing, accelerator, global_step): cross_entropy_per_percent_masked_bucket = muse.training_utils.cross_entropy_per_percent_masked_bucket(logits, labels, input_ids, mask_id, output_size, label_smoothing) cross_entropy_log = {} for (bucket, bucket_cross_entropy) in enumerate(cross_entropy_per_percent_masked_bucket): bucket_cross_entropy = bucket_cross_entropy.item() if bucket_cross_entropy != 0: cross_entropy_log[f'bucket {bucket}'] = bucket_cross_entropy accelerator.log({'cross entropy/strats': cross_entropy_log}, step=global_step) @torch.no_grad() def log_token_probability_distributions(logits, input_ids, mask_id, accelerator, global_step): token_probability_distributions = muse.training_utils.token_probability_distributions_per_percent_masked_bucket(logits, input_ids, mask_id) token_probability_distributions_fig = px.histogram(token_probability_distributions, x='masked_pixel_prob', color='bucket', color_discrete_sequence=px.colors.qualitative.Plotly, marginal='rug') accelerator.log({'token_probability_distributions/stats': token_probability_distributions_fig}, step=global_step) if __name__ == '__main__': main()