# File: pytorch-image-models-main/avg_checkpoints.py """""" import torch import argparse import os import glob import hashlib from timm.models import load_state_dict try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False DEFAULT_OUTPUT = './averaged.pth' DEFAULT_SAFE_OUTPUT = './averaged.safetensors' parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager') parser.add_argument('--input', default='', type=str, metavar='PATH', help='path to base input folder containing checkpoints') parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD', help='checkpoint filter (path wildcard)') parser.add_argument('--output', default=DEFAULT_OUTPUT, type=str, metavar='PATH', help=f'Output filename. Defaults to {DEFAULT_SAFE_OUTPUT} when passing --safetensors.') parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', help='Force not using ema version of weights (if present)') parser.add_argument('--no-sort', dest='no_sort', action='store_true', help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant') parser.add_argument('-n', type=int, default=10, metavar='N', help='Number of checkpoints to average') parser.add_argument('--safetensors', action='store_true', help='Save weights using safetensors instead of the default torch way (pickle).') def checkpoint_metric(checkpoint_path): if not checkpoint_path or not os.path.isfile(checkpoint_path): return {} print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path)) checkpoint = torch.load(checkpoint_path, map_location='cpu') metric = None if 'metric' in checkpoint: metric = checkpoint['metric'] elif 'metrics' in checkpoint and 'metric_name' in checkpoint: metrics = checkpoint['metrics'] print(metrics) metric = metrics[checkpoint['metric_name']] return metric def main(): args = parser.parse_args() args.use_ema = not args.no_use_ema args.sort = not args.no_sort if args.safetensors and args.output == DEFAULT_OUTPUT: args.output = DEFAULT_SAFE_OUTPUT (output, output_ext) = os.path.splitext(args.output) if not output_ext: output_ext = '.safetensors' if args.safetensors else '.pth' output = output + output_ext if args.safetensors and (not output_ext == '.safetensors'): print(f"Warning: saving weights as safetensors but output file extension is not set to '.safetensors': {args.output}") if os.path.exists(output): print('Error: Output filename ({}) already exists.'.format(output)) exit(1) pattern = args.input if not args.input.endswith(os.path.sep) and (not args.filter.startswith(os.path.sep)): pattern += os.path.sep pattern += args.filter checkpoints = glob.glob(pattern, recursive=True) if args.sort: checkpoint_metrics = [] for c in checkpoints: metric = checkpoint_metric(c) if metric is not None: checkpoint_metrics.append((metric, c)) checkpoint_metrics = list(sorted(checkpoint_metrics)) checkpoint_metrics = checkpoint_metrics[-args.n:] if checkpoint_metrics: print('Selected checkpoints:') [print(m, c) for (m, c) in checkpoint_metrics] avg_checkpoints = [c for (m, c) in checkpoint_metrics] else: avg_checkpoints = checkpoints if avg_checkpoints: print('Selected checkpoints:') [print(c) for c in checkpoints] if not avg_checkpoints: print('Error: No checkpoints found to average.') exit(1) avg_state_dict = {} avg_counts = {} for c in avg_checkpoints: new_state_dict = load_state_dict(c, args.use_ema) if not new_state_dict: print(f"Error: Checkpoint ({c}) doesn't exist") continue for (k, v) in new_state_dict.items(): if k not in avg_state_dict: avg_state_dict[k] = v.clone().to(dtype=torch.float64) avg_counts[k] = 1 else: avg_state_dict[k] += v.to(dtype=torch.float64) avg_counts[k] += 1 for (k, v) in avg_state_dict.items(): v.div_(avg_counts[k]) float32_info = torch.finfo(torch.float32) final_state_dict = {} for (k, v) in avg_state_dict.items(): v = v.clamp(float32_info.min, float32_info.max) final_state_dict[k] = v.to(dtype=torch.float32) if args.safetensors: assert _has_safetensors, '`pip install safetensors` to use .safetensors' safetensors.torch.save_file(final_state_dict, output) else: torch.save(final_state_dict, output) with open(output, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() print(f"=> Saved state_dict to '{output}, SHA256: {sha_hash}'") if __name__ == '__main__': main() # File: pytorch-image-models-main/benchmark.py """""" import argparse import csv import json import logging import time from collections import OrderedDict from contextlib import suppress from functools import partial import torch import torch.nn as nn import torch.nn.parallel from timm.data import resolve_data_config from timm.layers import set_fast_norm from timm.models import create_model, is_model, list_models from timm.optim import create_optimizer_v2 from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs, reparameterize_model has_apex = False try: from apex import amp has_apex = True except ImportError: pass has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass try: from deepspeed.profiling.flops_profiler import get_model_profile has_deepspeed_profiling = True except ImportError as e: has_deepspeed_profiling = False try: from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis has_fvcore_profiling = True except ImportError as e: FlopCountAnalysis = None has_fvcore_profiling = False try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True _logger = logging.getLogger('validate') parser = argparse.ArgumentParser(description='PyTorch Benchmark') parser.add_argument('--model-list', metavar='NAME', default='', help='txt file based list of model names to benchmark') parser.add_argument('--bench', default='both', type=str, help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") parser.add_argument('--detail', action='store_true', default=False, help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') parser.add_argument('--no-retry', action='store_true', default=False, help='Do not decay batch size and retry on error.') parser.add_argument('--results-file', default='', type=str, help='Output csv file for validation results (summary)') parser.add_argument('--results-format', default='csv', type=str, help='Format for results file one of (csv, json) (default: csv).') parser.add_argument('--num-warm-iter', default=10, type=int, help='Number of warmup iterations (default: 10)') parser.add_argument('--num-bench-iter', default=40, type=int, help='Number of benchmark iterations (default: 40)') parser.add_argument('--device', default='cuda', type=str, help='device to run benchmark on') parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', help='model architecture (default: resnet50)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='Run inference at train size, not test-input-size if it exists.') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--grad-checkpointing', action='store_true', default=False, help='Enable gradient checkpointing through model blocks/stages') parser.add_argument('--amp', action='store_true', default=False, help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.') parser.add_argument('--precision', default='float32', type=str, help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true', help='convert model torchscript for inference') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help='Enable compilation w/ specified backend (default: inductor).') scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help='Enable AOT Autograd optimization.') parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd"') parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.0001, help='weight decay (default: 0.0001)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') def timestamp(sync=False): return time.perf_counter() def cuda_timestamp(sync=False, device=None): if sync: torch.cuda.synchronize(device=device) return time.perf_counter() def count_params(model: nn.Module): return sum([m.numel() for m in model.parameters()]) def resolve_precision(precision: str): assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32') amp_dtype = None model_dtype = torch.float32 data_dtype = torch.float32 if precision == 'amp': amp_dtype = torch.float16 elif precision == 'amp_bfloat16': amp_dtype = torch.bfloat16 elif precision == 'float16': model_dtype = torch.float16 data_dtype = torch.float16 elif precision == 'bfloat16': model_dtype = torch.bfloat16 data_dtype = torch.bfloat16 return (amp_dtype, model_dtype, data_dtype) def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False): (_, macs, _) = get_model_profile(model=model, input_shape=(batch_size,) + input_size, print_profile=detailed, detailed=detailed, warm_up=10, as_string=False, output_file=None, ignore_modules=None) return (macs, 0) def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False): if force_cpu: model = model.to('cpu') (device, dtype) = (next(model.parameters()).device, next(model.parameters()).dtype) example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype) fca = FlopCountAnalysis(model, example_input) aca = ActivationCountAnalysis(model, example_input) if detailed: fcs = flop_count_str(fca) print(fcs) return (fca.total(), aca.total()) class BenchmarkRunner: def __init__(self, model_name, detail=False, device='cuda', torchscript=False, torchcompile=None, aot_autograd=False, reparam=False, precision='float32', fuser='', num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs): self.model_name = model_name self.detail = detail self.device = device (self.amp_dtype, self.model_dtype, self.data_dtype) = resolve_precision(precision) self.channels_last = kwargs.pop('channels_last', False) if self.amp_dtype is not None: self.amp_autocast = partial(torch.cuda.amp.autocast, dtype=self.amp_dtype) else: self.amp_autocast = suppress if fuser: set_jit_fuser(fuser) self.model = create_model(model_name, num_classes=kwargs.pop('num_classes', None), in_chans=3, global_pool=kwargs.pop('gp', 'fast'), scriptable=torchscript, drop_rate=kwargs.pop('drop', 0.0), drop_path_rate=kwargs.pop('drop_path', None), drop_block_rate=kwargs.pop('drop_block', None), **kwargs.pop('model_kwargs', {})) if reparam: self.model = reparameterize_model(self.model) self.model.to(device=self.device, dtype=self.model_dtype, memory_format=torch.channels_last if self.channels_last else None) self.num_classes = self.model.num_classes self.param_count = count_params(self.model) _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size) self.input_size = data_config['input_size'] self.batch_size = kwargs.pop('batch_size', 256) self.compiled = False if torchscript: self.model = torch.jit.script(self.model) self.compiled = True elif torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.' torch._dynamo.reset() self.model = torch.compile(self.model, backend=torchcompile) self.compiled = True elif aot_autograd: assert has_functorch, 'functorch is needed for --aot-autograd' self.model = memory_efficient_fusion(self.model) self.compiled = True self.example_inputs = None self.num_warm_iter = num_warm_iter self.num_bench_iter = num_bench_iter self.log_freq = num_bench_iter // 5 if 'cuda' in self.device: self.time_fn = partial(cuda_timestamp, device=self.device) else: self.time_fn = timestamp def _init_input(self): self.example_inputs = torch.randn((self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) if self.channels_last: self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) class InferenceBenchmarkRunner(BenchmarkRunner): def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.eval() def run(self): def _step(): t_step_start = self.time_fn() with self.amp_autocast(): output = self.model(self.example_inputs) t_step_end = self.time_fn(True) return t_step_end - t_step_start _logger.info(f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ input size {self.input_size} and batch size {self.batch_size}.') with torch.no_grad(): self._init_input() for _ in range(self.num_warm_iter): _step() total_step = 0.0 num_samples = 0 t_run_start = self.time_fn() for i in range(self.num_bench_iter): delta_fwd = _step() total_step += delta_fwd num_samples += self.batch_size num_steps = i + 1 if num_steps % self.log_freq == 0: _logger.info(f'Infer [{num_steps}/{self.num_bench_iter}]. {num_samples / total_step:0.2f} samples/sec. {1000 * total_step / num_steps:0.3f} ms/step.') t_run_end = self.time_fn(True) t_run_elapsed = t_run_end - t_run_start results = dict(samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1000000.0, 2)) retries = 0 if self.compiled else 2 while retries: retries -= 1 try: if has_deepspeed_profiling: (macs, _) = profile_deepspeed(self.model, self.input_size) results['gmacs'] = round(macs / 1000000000.0, 2) elif has_fvcore_profiling: (macs, activations) = profile_fvcore(self.model, self.input_size, force_cpu=not retries) results['gmacs'] = round(macs / 1000000000.0, 2) results['macts'] = round(activations / 1000000.0, 2) except RuntimeError as e: pass _logger.info(f"Inference benchmark of {self.model_name} done. {results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") return results class TrainBenchmarkRunner(BenchmarkRunner): def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.train() self.loss = nn.CrossEntropyLoss().to(self.device) self.target_shape = tuple() self.optimizer = create_optimizer_v2(self.model, opt=kwargs.pop('opt', 'sgd'), lr=kwargs.pop('lr', 0.0001)) if kwargs.pop('grad_checkpointing', False): self.model.set_grad_checkpointing() def _gen_target(self, batch_size): return torch.empty((batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) def run(self): def _step(detail=False): self.optimizer.zero_grad() t_start = self.time_fn() t_fwd_end = t_start t_bwd_end = t_start with self.amp_autocast(): output = self.model(self.example_inputs) if isinstance(output, tuple): output = output[0] if detail: t_fwd_end = self.time_fn(True) target = self._gen_target(output.shape[0]) self.loss(output, target).backward() if detail: t_bwd_end = self.time_fn(True) self.optimizer.step() t_end = self.time_fn(True) if detail: delta_fwd = t_fwd_end - t_start delta_bwd = t_bwd_end - t_fwd_end delta_opt = t_end - t_bwd_end return (delta_fwd, delta_bwd, delta_opt) else: delta_step = t_end - t_start return delta_step _logger.info(f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ input size {self.input_size} and batch size {self.batch_size}.') self._init_input() for _ in range(self.num_warm_iter): _step() t_run_start = self.time_fn() if self.detail: total_fwd = 0.0 total_bwd = 0.0 total_opt = 0.0 num_samples = 0 for i in range(self.num_bench_iter): (delta_fwd, delta_bwd, delta_opt) = _step(True) num_samples += self.batch_size total_fwd += delta_fwd total_bwd += delta_bwd total_opt += delta_opt num_steps = i + 1 if num_steps % self.log_freq == 0: total_step = total_fwd + total_bwd + total_opt _logger.info(f'Train [{num_steps}/{self.num_bench_iter}]. {num_samples / total_step:0.2f} samples/sec. {1000 * total_fwd / num_steps:0.3f} ms/step fwd, {1000 * total_bwd / num_steps:0.3f} ms/step bwd, {1000 * total_opt / num_steps:0.3f} ms/step opt.') total_step = total_fwd + total_bwd + total_opt t_run_elapsed = self.time_fn() - t_run_start results = dict(samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), opt_time=round(1000 * total_opt / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1000000.0, 2)) else: total_step = 0.0 num_samples = 0 for i in range(self.num_bench_iter): delta_step = _step(False) num_samples += self.batch_size total_step += delta_step num_steps = i + 1 if num_steps % self.log_freq == 0: _logger.info(f'Train [{num_steps}/{self.num_bench_iter}]. {num_samples / total_step:0.2f} samples/sec. {1000 * total_step / num_steps:0.3f} ms/step.') t_run_elapsed = self.time_fn() - t_run_start results = dict(samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1000000.0, 2)) _logger.info(f"Train benchmark of {self.model_name} done. {results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") return results class ProfileRunner(BenchmarkRunner): def __init__(self, model_name, device='cuda', profiler='', **kwargs): super().__init__(model_name=model_name, device=device, **kwargs) if not profiler: if has_deepspeed_profiling: profiler = 'deepspeed' elif has_fvcore_profiling: profiler = 'fvcore' assert profiler, 'One of deepspeed or fvcore needs to be installed for profiling to work.' self.profiler = profiler self.model.eval() def run(self): _logger.info(f'Running profiler on {self.model_name} w/ input size {self.input_size} and batch size {self.batch_size}.') macs = 0 activations = 0 if self.profiler == 'deepspeed': (macs, _) = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True) elif self.profiler == 'fvcore': (macs, activations) = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True) results = dict(gmacs=round(macs / 1000000000.0, 2), macts=round(activations / 1000000.0, 2), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1000000.0, 2)) _logger.info(f"Profile of {self.model_name} done. {results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.") return results def _try_run(model_name, bench_fn, bench_kwargs, initial_batch_size, no_batch_size_retry=False): batch_size = initial_batch_size results = dict() error_str = 'Unknown' while batch_size: try: torch.cuda.empty_cache() bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) results = bench.run() return results except RuntimeError as e: error_str = str(e) _logger.error(f'"{error_str}" while running benchmark.') if not check_batch_size_retry(error_str): _logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.') break if no_batch_size_retry: break batch_size = decay_batch_step(batch_size) _logger.warning(f'Reducing batch size to {batch_size} for retry.') results['error'] = error_str return results def benchmark(args): if args.amp: _logger.warning("Overriding precision to 'amp' since --amp flag set.") args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype]) _logger.info(f"Benchmarking in {args.precision} precision. {('NHWC' if args.channels_last else 'NCHW')} layout. torchscript {('enabled' if args.torchscript else 'disabled')}") bench_kwargs = vars(args).copy() bench_kwargs.pop('amp') model = bench_kwargs.pop('model') batch_size = bench_kwargs.pop('batch_size') bench_fns = (InferenceBenchmarkRunner,) prefixes = ('infer',) if args.bench == 'both': bench_fns = (InferenceBenchmarkRunner, TrainBenchmarkRunner) prefixes = ('infer', 'train') elif args.bench == 'train': bench_fns = (TrainBenchmarkRunner,) prefixes = ('train',) elif args.bench.startswith('profile'): if 'deepspeed' in args.bench: assert has_deepspeed_profiling, 'deepspeed must be installed to use deepspeed flop counter' bench_kwargs['profiler'] = 'deepspeed' elif 'fvcore' in args.bench: assert has_fvcore_profiling, 'fvcore must be installed to use fvcore flop counter' bench_kwargs['profiler'] = 'fvcore' bench_fns = (ProfileRunner,) batch_size = 1 model_results = OrderedDict(model=model) for (prefix, bench_fn) in zip(prefixes, bench_fns): run_results = _try_run(model, bench_fn, bench_kwargs=bench_kwargs, initial_batch_size=batch_size, no_batch_size_retry=args.no_retry) if prefix and 'error' not in run_results: run_results = {'_'.join([prefix, k]): v for (k, v) in run_results.items()} model_results.update(run_results) if 'error' in run_results: break if 'error' not in model_results: param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) model_results.setdefault('param_count', param_count) model_results.pop('train_param_count', 0) return model_results def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if args.fast_norm: set_fast_norm() if args.model_list: args.model = '' with open(args.model_list) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names] elif args.model == 'all': args.pretrained = True model_names = list_models(pretrained=True, exclude_filters=['*in21k']) model_cfgs = [(n, None) for n in model_names] elif not is_model(args.model): model_names = list_models(args.model) model_cfgs = [(n, None) for n in model_names] if len(model_cfgs): _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) results = [] try: for (m, _) in model_cfgs: if not m: continue args.model = m r = benchmark(args) if r: results.append(r) time.sleep(10) except KeyboardInterrupt as e: pass sort_key = 'infer_samples_per_sec' if 'train' in args.bench: sort_key = 'train_samples_per_sec' elif 'profile' in args.bench: sort_key = 'infer_gmacs' results = filter(lambda x: sort_key in x, results) results = sorted(results, key=lambda x: x[sort_key], reverse=True) else: results = benchmark(args) if args.results_file: write_results(args.results_file, results, format=args.results_format) print(f'--result\n{json.dumps(results, indent=4)}') def write_results(results_file, results, format='csv'): with open(results_file, mode='w') as cf: if format == 'json': json.dump(results, cf, indent=4) else: if not isinstance(results, (list, tuple)): results = [results] if not results: return dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main() # File: pytorch-image-models-main/bulk_runner.py """""" import argparse import os import sys import csv import json import subprocess import time from typing import Callable, List, Tuple, Union from timm.models import is_model, list_models, get_pretrained_cfg, get_arch_pretrained_cfgs parser = argparse.ArgumentParser(description='Per-model process launcher') parser.add_argument('--model-list', metavar='NAME', default='', help='txt file based list of model names to benchmark') parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', help='Output csv file for validation results (summary)') parser.add_argument('--sort-key', default='', type=str, metavar='COL', help='Specify sort key for results csv') parser.add_argument('--pretrained', action='store_true', help='only run models with pretrained weights') parser.add_argument('--delay', type=float, default=0, help='Interval, in seconds, to delay between model invocations.') parser.add_argument('--start_method', type=str, default='spawn', choices=['spawn', 'fork', 'forkserver'], help='Multiprocessing start method to use when creating workers.') parser.add_argument('--no_python', help="Skip prepending the script with 'python' - just execute it directly. Useful when the script is not a Python script.") parser.add_argument('-m', '--module', help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.") parser.add_argument('script', type=str, help='Full path to the program/script to be launched for each model config.') parser.add_argument('script_args', nargs=argparse.REMAINDER) def cmd_from_args(args) -> Tuple[Union[Callable, str], List[str]]: with_python = not args.no_python cmd: Union[Callable, str] cmd_args = [] if with_python: cmd = os.getenv('PYTHON_EXEC', sys.executable) cmd_args.append('-u') if args.module: cmd_args.append('-m') cmd_args.append(args.script) else: if args.module: raise ValueError("Don't use both the '--no_python' flag and the '--module' flag at the same time.") cmd = args.script cmd_args.extend(args.script_args) return (cmd, cmd_args) def _get_model_cfgs(model_names, num_classes=None, expand_train_test=False, include_crop=True, expand_arch=False): model_cfgs = set() for name in model_names: if expand_arch: pt_cfgs = get_arch_pretrained_cfgs(name).values() else: pt_cfg = get_pretrained_cfg(name) pt_cfgs = [pt_cfg] if pt_cfg is not None else [] for cfg in pt_cfgs: if cfg.input_size is None: continue if num_classes is not None and getattr(cfg, 'num_classes', 0) != num_classes: continue size = cfg.input_size[-1] if include_crop: model_cfgs.add((name, size, cfg.crop_pct)) else: model_cfgs.add((name, size)) if expand_train_test and cfg.test_input_size is not None: test_size = cfg.test_input_size[-1] if include_crop: test_crop = cfg.test_crop_pct or cfg.crop_pct model_cfgs.add((name, test_size, test_crop)) else: model_cfgs.add((name, test_size)) if include_crop: return [(n, {'img-size': r, 'crop-pct': cp}) for (n, r, cp) in sorted(model_cfgs)] else: return [(n, {'img-size': r}) for (n, r) in sorted(model_cfgs)] def main(): args = parser.parse_args() (cmd, cmd_args) = cmd_from_args(args) model_cfgs = [] if args.model_list == 'all': model_names = list_models(pretrained=args.pretrained) model_cfgs = [(n, None) for n in model_names] elif args.model_list == 'all_in1k': model_names = list_models(pretrained=True) model_cfgs = _get_model_cfgs(model_names, num_classes=1000, expand_train_test=True) elif args.model_list == 'all_res': model_names = list_models() model_cfgs = _get_model_cfgs(model_names, expand_train_test=True, include_crop=False, expand_arch=True) elif not is_model(args.model_list): model_names = list_models(args.model_list) model_cfgs = [(n, None) for n in model_names] if not model_cfgs and os.path.exists(args.model_list): with open(args.model_list) as f: model_names = [line.rstrip() for line in f] model_cfgs = _get_model_cfgs(model_names, expand_train_test=True) if len(model_cfgs): results_file = args.results_file or './results.csv' results = [] errors = [] model_strings = '\n'.join([f'{x[0]}, {x[1]}' for x in model_cfgs]) print(f'Running script on these models:\n {model_strings}') if not args.sort_key: if 'benchmark' in args.script: if any(['train' in a for a in args.script_args]): sort_key = 'train_samples_per_sec' else: sort_key = 'infer_samples_per_sec' else: sort_key = 'top1' else: sort_key = args.sort_key print(f'Script: {args.script}, Args: {args.script_args}, Sort key: {sort_key}') try: for (m, ax) in model_cfgs: if not m: continue args_str = (cmd, *[str(e) for e in cmd_args], '--model', m) if ax is not None: extra_args = [(f'--{k}', str(v)) for (k, v) in ax.items()] extra_args = [i for t in extra_args for i in t] args_str += tuple(extra_args) try: o = subprocess.check_output(args=args_str).decode('utf-8').split('--result')[-1] r = json.loads(o) results.append(r) except Exception as e: errors.append(dict(model=m, error=str(e))) if args.delay: time.sleep(args.delay) except KeyboardInterrupt as e: pass errors.extend(list(filter(lambda x: 'error' in x, results))) if errors: print(f'{len(errors)} models had errors during run.') for e in errors: if 'model' in e: print(f"\t {e['model']} ({e.get('error', 'Unknown')})") else: print(e) results = list(filter(lambda x: 'error' not in x, results)) no_sortkey = list(filter(lambda x: sort_key not in x, results)) if no_sortkey: print(f'{len(no_sortkey)} results missing sort key, skipping sort.') else: results = sorted(results, key=lambda x: x[sort_key], reverse=True) if len(results): print(f'{len(results)} models run successfully. Saving results to {results_file}.') write_results(results_file, results) def write_results(results_file, results): with open(results_file, mode='w') as cf: dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main() # File: pytorch-image-models-main/clean_checkpoint.py """""" import torch import argparse import os import hashlib import shutil import tempfile from timm.models import load_state_dict try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--output', default='', type=str, metavar='PATH', help='output path') parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', help='use ema version of weights if present') parser.add_argument('--no-hash', dest='no_hash', action='store_true', help='no hash in output filename') parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true', help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint') parser.add_argument('--safetensors', action='store_true', help='Save weights using safetensors instead of the default torch way (pickle).') def main(): args = parser.parse_args() if os.path.exists(args.output): print('Error: Output filename ({}) already exists.'.format(args.output)) exit(1) clean_checkpoint(args.checkpoint, args.output, not args.no_use_ema, args.no_hash, args.clean_aux_bn, safe_serialization=args.safetensors) def clean_checkpoint(checkpoint, output, use_ema=True, no_hash=False, clean_aux_bn=False, safe_serialization: bool=False): if checkpoint and os.path.isfile(checkpoint): print("=> Loading checkpoint '{}'".format(checkpoint)) state_dict = load_state_dict(checkpoint, use_ema=use_ema) new_state_dict = {} for (k, v) in state_dict.items(): if clean_aux_bn and 'aux_bn' in k: continue name = k[7:] if k.startswith('module.') else k new_state_dict[name] = v print("=> Loaded state_dict from '{}'".format(checkpoint)) ext = '' if output: (checkpoint_root, checkpoint_base) = os.path.split(output) (checkpoint_base, ext) = os.path.splitext(checkpoint_base) else: checkpoint_root = '' checkpoint_base = os.path.split(checkpoint)[1] checkpoint_base = os.path.splitext(checkpoint_base)[0] temp_filename = '__' + checkpoint_base if safe_serialization: assert _has_safetensors, '`pip install safetensors` to use .safetensors' safetensors.torch.save_file(new_state_dict, temp_filename) else: torch.save(new_state_dict, temp_filename) with open(temp_filename, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() if ext: final_ext = ext else: final_ext = '.safetensors' if safe_serialization else '.pth' if no_hash: final_filename = checkpoint_base + final_ext else: final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + final_ext shutil.move(temp_filename, os.path.join(checkpoint_root, final_filename)) print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) return final_filename else: print("Error: Checkpoint ({}) doesn't exist".format(checkpoint)) return '' if __name__ == '__main__': main() # File: pytorch-image-models-main/convert/convert_from_mxnet.py import argparse import hashlib import os import mxnet as mx import gluoncv import torch from timm import create_model parser = argparse.ArgumentParser(description='Convert from MXNet') parser.add_argument('--model', default='all', type=str, metavar='MODEL', help='Name of model to train (default: "all"') def convert(mxnet_name, torch_name): net = gluoncv.model_zoo.get_model(mxnet_name, pretrained=True) torch_net = create_model(torch_name) mxp = [(k, v) for (k, v) in net.collect_params().items() if 'running' not in k] torchp = list(torch_net.named_parameters()) torch_params = {} for ((tn, tv), (mn, mv)) in zip(torchp, mxp): m_split = mn.split('_') t_split = tn.split('.') print(t_split, m_split) print(tv.shape, mv.shape) if m_split[-1] == 'gamma': assert t_split[-1] == 'weight' if m_split[-1] == 'beta': assert t_split[-1] == 'bias' assert all((t == m for (t, m) in zip(tv.shape, mv.shape))) torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor mxb = [(k, v) for (k, v) in net.collect_params().items() if any((x in k for x in ['running_mean', 'running_var']))] torchb = [(k, v) for (k, v) in torch_net.named_buffers() if 'num_batches' not in k] for ((tn, tv), (mn, mv)) in zip(torchb, mxb): print(tn, mn) print(tv.shape, mv.shape) if 'running_var' in tn: assert 'running_var' in mn if 'running_mean' in tn: assert 'running_mean' in mn torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor torch_net.load_state_dict(torch_params) torch_filename = './%s.pth' % torch_name torch.save(torch_net.state_dict(), torch_filename) with open(torch_filename, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() final_filename = os.path.splitext(torch_filename)[0] + '-' + sha_hash[:8] + '.pth' os.rename(torch_filename, final_filename) print("=> Saved converted model to '{}, SHA256: {}'".format(final_filename, sha_hash)) def map_mx_to_torch_model(mx_name): torch_name = mx_name.lower() if torch_name.startswith('se_'): torch_name = torch_name.replace('se_', 'se') elif torch_name.startswith('senet_'): torch_name = torch_name.replace('senet_', 'senet') elif torch_name.startswith('inceptionv3'): torch_name = torch_name.replace('inceptionv3', 'inception_v3') torch_name = 'gluon_' + torch_name return torch_name ALL = ['resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', 'resnet101_v1b', 'resnet152_v1b', 'resnet50_v1c', 'resnet101_v1c', 'resnet152_v1c', 'resnet50_v1d', 'resnet101_v1d', 'resnet152_v1d', 'resnet50_v1s', 'resnet101_v1s', 'resnet152_v1s', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnext101_64x4d', 'senet_154', 'inceptionv3'] def main(): args = parser.parse_args() if not args.model or args.model == 'all': for mx_model in ALL: torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) else: mx_model = args.model torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) if __name__ == '__main__': main() # File: pytorch-image-models-main/convert/convert_nest_flax.py """""" import sys import numpy as np import torch from clu import checkpoint arch_depths = {'nest_base': [2, 2, 20], 'nest_small': [2, 2, 20], 'nest_tiny': [2, 2, 8]} def convert_nest(checkpoint_path, arch): assert arch in ['nest_base', 'nest_small', 'nest_tiny'], 'Your `arch` is not supported' flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target'] state_dict = {} state_dict['patch_embed.proj.weight'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1) state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias']) posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')] for (i, k) in enumerate(posemb_keys): state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding']) depths = arch_depths[arch] for level in range(len(depths)): for layer in range(depths[level]): global_layer_ix = sum(depths[:level]) + layer for i in range(2): state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i + 1}.weight'] = torch.tensor(flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale']) state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i + 1}.bias'] = torch.tensor(flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias']) w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel'] w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel'] w_kv = np.concatenate(np.split(w_kv, 2, -1), 1) w_qkv = np.concatenate([w_q, w_kv], 1) state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1, 0) b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias'] b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias'] b_kv = np.concatenate(np.split(b_kv, 2, -1), 0) b_qkv = np.concatenate([b_q, b_kv], 0) state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1) w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel'] w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1) state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor(flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias']) for i in range(2): state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i + 1}.weight'] = torch.tensor(flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0) state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i + 1}.bias'] = torch.tensor(flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias']) for level in range(1, len(depths)): state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor(flax_dict[f'ConvPool_{level - 1}']['Conv_0']['kernel']).permute(3, 2, 0, 1) state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor(flax_dict[f'ConvPool_{level - 1}']['Conv_0']['bias']) state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor(flax_dict[f'ConvPool_{level - 1}']['LayerNorm_0']['scale']) state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor(flax_dict[f'ConvPool_{level - 1}']['LayerNorm_0']['bias']) state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale']) state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias']) state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0) state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias']) return state_dict if __name__ == '__main__': variant = sys.argv[1] state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}') torch.save(state_dict, f'./jx_nest_{variant}.pth') # File: pytorch-image-models-main/inference.py """""" import argparse import json import logging import os import time from contextlib import suppress from functools import partial import numpy as np import pandas as pd import torch from timm.data import create_dataset, create_loader, resolve_data_config, ImageNetInfo, infer_imagenet_subset from timm.layers import apply_test_time_pool from timm.models import create_model from timm.utils import AverageMeter, setup_default_logging, set_jit_fuser, ParseKwargs try: from apex import amp has_apex = True except ImportError: has_apex = False has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') _FMT_EXT = {'json': '.json', 'json-record': '.json', 'json-split': '.json', 'parquet': '.parquet', 'csv': '.csv'} torch.backends.cudnn.benchmark = True _logger = logging.getLogger('inference') parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference') parser.add_argument('data', nargs='?', metavar='DIR', const=None, help='path to dataset (*deprecated*, use --data-dir)') parser.add_argument('--data-dir', metavar='DIR', help='path to dataset (root dir)') parser.add_argument('--dataset', metavar='NAME', default='', help='dataset type + name ("/") (default: ImageFolder or ImageTar if empty)') parser.add_argument('--split', metavar='NAME', default='validation', help='dataset split (default: validation)') parser.add_argument('--model', '-m', metavar='MODEL', default='resnet50', help='model architecture (default: resnet50)') parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', help='number of data loading workers (default: 2)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--in-chans', type=int, default=None, metavar='N', help='Image input channels (default: None => 3)') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='force use of train input size, even when test size is specified in pretrained cfg') parser.add_argument('--crop-pct', default=None, type=float, metavar='N', help='Input image center crop pct') parser.add_argument('--crop-mode', default=None, type=str, metavar='N', help='Input image crop mode (squash, border, center). Model default if None.') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', help='path to class to idx mapping file (default: "")') parser.add_argument('--log-freq', default=10, type=int, metavar='N', help='batch logging frequency (default: 10)') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--num-gpu', type=int, default=1, help='Number of GPUS to use') parser.add_argument('--test-pool', dest='test_pool', action='store_true', help='enable test time pool') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--device', default='cuda', type=str, help='Device (accelerator) to use.') parser.add_argument('--amp', action='store_true', default=False, help='use Native AMP for mixed precision training') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16)') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', default=False, action='store_true', help='torch.jit.script the full model') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help='Enable compilation w/ specified backend (default: inductor).') scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help='Enable AOT Autograd support.') parser.add_argument('--results-dir', type=str, default=None, help='folder for output results') parser.add_argument('--results-file', type=str, default=None, help='results filename (relative to results-dir)') parser.add_argument('--results-format', type=str, nargs='+', default=['csv'], help='results format (one of "csv", "json", "json-split", "parquet")') parser.add_argument('--results-separate-col', action='store_true', default=False, help='separate output columns per result index.') parser.add_argument('--topk', default=1, type=int, metavar='N', help='Top-k to output to CSV') parser.add_argument('--fullname', action='store_true', default=False, help='use full sample name in output (not just basename).') parser.add_argument('--filename-col', type=str, default='filename', help='name for filename / sample name column') parser.add_argument('--index-col', type=str, default='index', help='name for output indices column(s)') parser.add_argument('--label-col', type=str, default='label', help='name for output indices column(s)') parser.add_argument('--output-col', type=str, default=None, help='name for logit/probs output column(s)') parser.add_argument('--output-type', type=str, default='prob', help='output type colum ("prob" for probabilities, "logit" for raw logits)') parser.add_argument('--label-type', type=str, default='description', help='type of label to output, one of "none", "name", "description", "detailed"') parser.add_argument('--include-index', action='store_true', default=False, help='include the class index in results') parser.add_argument('--exclude-output', action='store_true', default=False, help='exclude logits/probs from results, just indices. topk must be set !=0.') def main(): setup_default_logging() args = parser.parse_args() args.pretrained = args.pretrained or not args.checkpoint if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True device = torch.device(args.device) amp_autocast = suppress if args.amp: assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).' assert args.amp_dtype in ('float16', 'bfloat16') amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16 amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype) _logger.info('Running inference in mixed precision with native PyTorch AMP.') else: _logger.info('Running inference in float32. AMP not enabled.') if args.fuser: set_jit_fuser(args.fuser) in_chans = 3 if args.in_chans is not None: in_chans = args.in_chans elif args.input_size is not None: in_chans = args.input_size[0] model = create_model(args.model, num_classes=args.num_classes, in_chans=in_chans, pretrained=args.pretrained, checkpoint_path=args.checkpoint, **args.model_kwargs) if args.num_classes is None: assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' args.num_classes = model.num_classes _logger.info(f'Model {args.model} created, param count: {sum([m.numel() for m in model.parameters()])}') data_config = resolve_data_config(vars(args), model=model) test_time_pool = False if args.test_pool: (model, test_time_pool) = apply_test_time_pool(model, data_config) model = model.to(device) model.eval() if args.channels_last: model = model.to(memory_format=torch.channels_last) if args.torchscript: model = torch.jit.script(model) elif args.torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.' torch._dynamo.reset() model = torch.compile(model, backend=args.torchcompile) elif args.aot_autograd: assert has_functorch, 'functorch is needed for --aot-autograd' model = memory_efficient_fusion(model) if args.num_gpu > 1: model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) root_dir = args.data or args.data_dir dataset = create_dataset(root=root_dir, name=args.dataset, split=args.split, class_map=args.class_map) if test_time_pool: data_config['crop_pct'] = 1.0 workers = 1 if 'tfds' in args.dataset or 'wds' in args.dataset else args.workers loader = create_loader(dataset, batch_size=args.batch_size, use_prefetcher=True, num_workers=workers, device=device, **data_config) to_label = None if args.label_type in ('name', 'description', 'detail'): imagenet_subset = infer_imagenet_subset(model) if imagenet_subset is not None: dataset_info = ImageNetInfo(imagenet_subset) if args.label_type == 'name': to_label = lambda x: dataset_info.index_to_label_name(x) elif args.label_type == 'detail': to_label = lambda x: dataset_info.index_to_description(x, detailed=True) else: to_label = lambda x: dataset_info.index_to_description(x) to_label = np.vectorize(to_label) else: _logger.error('Cannot deduce ImageNet subset from model, no labelling will be performed.') top_k = min(args.topk, args.num_classes) batch_time = AverageMeter() end = time.time() all_indices = [] all_labels = [] all_outputs = [] use_probs = args.output_type == 'prob' with torch.no_grad(): for (batch_idx, (input, _)) in enumerate(loader): with amp_autocast(): output = model(input) if use_probs: output = output.softmax(-1) if top_k: (output, indices) = output.topk(top_k) np_indices = indices.cpu().numpy() if args.include_index: all_indices.append(np_indices) if to_label is not None: np_labels = to_label(np_indices) all_labels.append(np_labels) all_outputs.append(output.cpu().numpy()) batch_time.update(time.time() - end) end = time.time() if batch_idx % args.log_freq == 0: _logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(batch_idx, len(loader), batch_time=batch_time)) all_indices = np.concatenate(all_indices, axis=0) if all_indices else None all_labels = np.concatenate(all_labels, axis=0) if all_labels else None all_outputs = np.concatenate(all_outputs, axis=0).astype(np.float32) filenames = loader.dataset.filenames(basename=not args.fullname) output_col = args.output_col or ('prob' if use_probs else 'logit') data_dict = {args.filename_col: filenames} if args.results_separate_col and all_outputs.shape[-1] > 1: if all_indices is not None: for i in range(all_indices.shape[-1]): data_dict[f'{args.index_col}_{i}'] = all_indices[:, i] if all_labels is not None: for i in range(all_labels.shape[-1]): data_dict[f'{args.label_col}_{i}'] = all_labels[:, i] for i in range(all_outputs.shape[-1]): data_dict[f'{output_col}_{i}'] = all_outputs[:, i] else: if all_indices is not None: if all_indices.shape[-1] == 1: all_indices = all_indices.squeeze(-1) data_dict[args.index_col] = list(all_indices) if all_labels is not None: if all_labels.shape[-1] == 1: all_labels = all_labels.squeeze(-1) data_dict[args.label_col] = list(all_labels) if all_outputs.shape[-1] == 1: all_outputs = all_outputs.squeeze(-1) data_dict[output_col] = list(all_outputs) df = pd.DataFrame(data=data_dict) results_filename = args.results_file if results_filename: (filename_no_ext, ext) = os.path.splitext(results_filename) if ext and ext in _FMT_EXT.values(): results_filename = filename_no_ext else: img_size = data_config['input_size'][1] results_filename = f'{args.model}-{img_size}' if args.results_dir: results_filename = os.path.join(args.results_dir, results_filename) for fmt in args.results_format: save_results(df, results_filename, fmt) print(f'--result') print(df.set_index(args.filename_col).to_json(orient='index', indent=4)) def save_results(df, results_filename, results_format='csv', filename_col='filename'): results_filename += _FMT_EXT[results_format] if results_format == 'parquet': df.set_index(filename_col).to_parquet(results_filename) elif results_format == 'json': df.set_index(filename_col).to_json(results_filename, indent=4, orient='index') elif results_format == 'json-records': df.to_json(results_filename, lines=True, orient='records') elif results_format == 'json-split': df.to_json(results_filename, indent=4, orient='split', index=False) else: df.to_csv(results_filename, index=False) if __name__ == '__main__': main() # File: pytorch-image-models-main/onnx_export.py """""" import argparse import timm from timm.utils.model import reparameterize_model from timm.utils.onnx import onnx_export parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('output', metavar='ONNX_FILE', help='output model filename') parser.add_argument('--model', '-m', metavar='MODEL', default='mobilenetv3_large_100', help='model architecture (default: mobilenetv3_large_100)') parser.add_argument('--opset', type=int, default=None, help='ONNX opset to use (default: 10)') parser.add_argument('--keep-init', action='store_true', default=False, help='Keep initializers as input. Needed for Caffe2 compatible export in newer PyTorch/ONNX.') parser.add_argument('--aten-fallback', action='store_true', default=False, help='Fallback to ATEN ops. Helps fix AdaptiveAvgPool issue with Caffe2 in newer PyTorch/ONNX.') parser.add_argument('--dynamic-size', action='store_true', default=False, help='Export model width dynamic width/height. Not recommended for "tf" models with SAME padding.') parser.add_argument('--check-forward', action='store_true', default=False, help='Do a full check of torch vs onnx forward after export.') parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size (default: 1)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--num-classes', type=int, default=1000, help='Number classes in dataset') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to checkpoint (default: none)') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--training', default=False, action='store_true', help='Export in training mode (default is eval)') parser.add_argument('--verbose', default=False, action='store_true', help='Extra stdout output') parser.add_argument('--dynamo', default=False, action='store_true', help='Use torch dynamo export.') def main(): args = parser.parse_args() args.pretrained = True if args.checkpoint: args.pretrained = False print('==> Creating PyTorch {} model'.format(args.model)) model = timm.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, exportable=True) if args.reparam: model = reparameterize_model(model) onnx_export(model, args.output, opset=args.opset, dynamic_size=args.dynamic_size, aten_fallback=args.aten_fallback, keep_initializers=args.keep_init, check_forward=args.check_forward, training=args.training, verbose=args.verbose, use_dynamo=args.dynamo, input_size=(3, args.img_size, args.img_size), batch_size=args.batch_size) if __name__ == '__main__': main() # File: pytorch-image-models-main/onnx_validate.py """""" import argparse import numpy as np import onnxruntime from timm.data import create_loader, resolve_data_config, create_dataset from timm.utils import AverageMeter import time parser = argparse.ArgumentParser(description='ONNX Validation') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('--onnx-input', default='', type=str, metavar='PATH', help='path to onnx model/weights file') parser.add_argument('--onnx-output-opt', default='', type=str, metavar='PATH', help='path to output optimized onnx graph') parser.add_argument('--profile', action='store_true', default=False, help='Enable profiler output.') parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', help='number of data loading workers (default: 2)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT', help='Override default crop pct of 0.875') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)') def main(): args = parser.parse_args() args.gpu_id = 0 sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL if args.profile: sess_options.enable_profiling = True if args.onnx_output_opt: sess_options.optimized_model_filepath = args.onnx_output_opt session = onnxruntime.InferenceSession(args.onnx_input, sess_options) data_config = resolve_data_config(vars(args)) loader = create_loader(create_dataset('', args.data), input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=False, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=data_config['crop_pct']) input_name = session.get_inputs()[0].name batch_time = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() for (i, (input, target)) in enumerate(loader): output = session.run([], {input_name: input.data.numpy()}) output = output[0] (prec1, prec5) = accuracy_np(output, target.numpy()) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print(f'Test: [{i}/{len(loader)}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f}, {input.size(0) / batch_time.avg:.3f}/s, {100 * batch_time.avg / input.size(0):.3f} ms/sample) \tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})') print(f' * Prec@1 {top1.avg:.3f} ({100 - top1.avg:.3f}) Prec@5 {top5.avg:.3f} ({100.0 - top5.avg:.3f})') def accuracy_np(output, target): max_indices = np.argsort(output, axis=1)[:, ::-1] top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean() top1 = 100 * np.equal(max_indices[:, 0], target).mean() return (top1, top5) if __name__ == '__main__': main() # File: pytorch-image-models-main/results/generate_csv_results.py import numpy as np import pandas as pd results = {'results-imagenet.csv': ['results-imagenet-real.csv', 'results-imagenetv2-matched-frequency.csv', 'results-sketch.csv'], 'results-imagenet-a-clean.csv': ['results-imagenet-a.csv'], 'results-imagenet-r-clean.csv': ['results-imagenet-r.csv']} def diff(base_df, test_csv): base_df['mi'] = base_df.model + '-' + base_df.img_size.astype('str') base_models = base_df['mi'].values test_df = pd.read_csv(test_csv) test_df['mi'] = test_df.model + '-' + test_df.img_size.astype('str') test_models = test_df['mi'].values rank_diff = np.zeros_like(test_models, dtype='object') top1_diff = np.zeros_like(test_models, dtype='object') top5_diff = np.zeros_like(test_models, dtype='object') for (rank, model) in enumerate(test_models): if model in base_models: base_rank = int(np.where(base_models == model)[0]) top1_d = test_df['top1'][rank] - base_df['top1'][base_rank] top5_d = test_df['top5'][rank] - base_df['top5'][base_rank] if rank == base_rank: rank_diff[rank] = f'0' elif rank > base_rank: rank_diff[rank] = f'-{rank - base_rank}' else: rank_diff[rank] = f'+{base_rank - rank}' if top1_d >= 0.0: top1_diff[rank] = f'+{top1_d:.3f}' else: top1_diff[rank] = f'-{abs(top1_d):.3f}' if top5_d >= 0.0: top5_diff[rank] = f'+{top5_d:.3f}' else: top5_diff[rank] = f'-{abs(top5_d):.3f}' else: rank_diff[rank] = '' top1_diff[rank] = '' top5_diff[rank] = '' test_df['top1_diff'] = top1_diff test_df['top5_diff'] = top5_diff test_df['rank_diff'] = rank_diff test_df.drop('mi', axis=1, inplace=True) base_df.drop('mi', axis=1, inplace=True) test_df['param_count'] = test_df['param_count'].map('{:,.2f}'.format) test_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True) test_df.to_csv(test_csv, index=False, float_format='%.3f') for (base_results, test_results) in results.items(): base_df = pd.read_csv(base_results) base_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True) for test_csv in test_results: diff(base_df, test_csv) base_df['param_count'] = base_df['param_count'].map('{:,.2f}'.format) base_df.to_csv(base_results, index=False, float_format='%.3f') # File: pytorch-image-models-main/timm/data/__init__.py from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy, rand_augment_transform, auto_augment_transform from .config import resolve_data_config, resolve_model_data_config from .constants import * from .dataset import ImageDataset, IterableImageDataset, AugMixDataset from .dataset_factory import create_dataset from .dataset_info import DatasetInfo, CustomDatasetInfo from .imagenet_info import ImageNetInfo, infer_imagenet_subset from .loader import create_loader from .mixup import Mixup, FastCollateMixup from .readers import create_reader from .readers import get_img_extensions, is_img_extension, set_img_extensions, add_img_extensions, del_img_extensions from .real_labels import RealLabelsImagenet from .transforms import * from .transforms_factory import create_transform # File: pytorch-image-models-main/timm/data/auto_augment.py """""" import random import math import re from functools import partial from typing import Dict, List, Optional, Union from PIL import Image, ImageOps, ImageEnhance, ImageChops, ImageFilter import PIL import numpy as np _PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) _FILL = (128, 128, 128) _LEVEL_DENOM = 10.0 _HPARAMS_DEFAULT = dict(translate_const=250, img_mean=_FILL) if hasattr(Image, 'Resampling'): _RANDOM_INTERPOLATION = (Image.Resampling.BILINEAR, Image.Resampling.BICUBIC) _DEFAULT_INTERPOLATION = Image.Resampling.BICUBIC else: _RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) _DEFAULT_INTERPOLATION = Image.BICUBIC def _interpolation(kwargs): interpolation = kwargs.pop('resample', _DEFAULT_INTERPOLATION) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) return interpolation def _check_args_tf(kwargs): if 'fillcolor' in kwargs and _PIL_VER < (5, 0): kwargs.pop('fillcolor') kwargs['resample'] = _interpolation(kwargs) def shear_x(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) def shear_y(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) def translate_x_rel(img, pct, **kwargs): pixels = pct * img.size[0] _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) def translate_y_rel(img, pct, **kwargs): pixels = pct * img.size[1] _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) def translate_x_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) def translate_y_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) def rotate(img, degrees, **kwargs): _check_args_tf(kwargs) if _PIL_VER >= (5, 2): return img.rotate(degrees, **kwargs) if _PIL_VER >= (5, 0): (w, h) = img.size post_trans = (0, 0) rotn_center = (w / 2.0, h / 2.0) angle = -math.radians(degrees) matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix return (a * x + b * y + c, d * x + e * y + f) (matrix[2], matrix[5]) = transform(-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] return img.transform(img.size, Image.AFFINE, matrix, **kwargs) return img.rotate(degrees, resample=kwargs['resample']) def auto_contrast(img, **__): return ImageOps.autocontrast(img) def invert(img, **__): return ImageOps.invert(img) def equalize(img, **__): return ImageOps.equalize(img) def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh) def solarize_add(img, add, thresh=128, **__): lut = [] for i in range(256): if i < thresh: lut.append(min(255, i + add)) else: lut.append(i) if img.mode in ('L', 'RGB'): if img.mode == 'RGB' and len(lut) == 256: lut = lut + lut + lut return img.point(lut) return img def posterize(img, bits_to_keep, **__): if bits_to_keep >= 8: return img return ImageOps.posterize(img, bits_to_keep) def contrast(img, factor, **__): return ImageEnhance.Contrast(img).enhance(factor) def color(img, factor, **__): return ImageEnhance.Color(img).enhance(factor) def brightness(img, factor, **__): return ImageEnhance.Brightness(img).enhance(factor) def sharpness(img, factor, **__): return ImageEnhance.Sharpness(img).enhance(factor) def gaussian_blur(img, factor, **__): img = img.filter(ImageFilter.GaussianBlur(radius=factor)) return img def gaussian_blur_rand(img, factor, **__): radius_min = 0.1 radius_max = 2.0 img = img.filter(ImageFilter.GaussianBlur(radius=random.uniform(radius_min, radius_max * factor))) return img def desaturate(img, factor, **_): factor = min(1.0, max(0.0, 1.0 - factor)) return ImageEnhance.Color(img).enhance(factor) def _randomly_negate(v): return -v if random.random() > 0.5 else v def _rotate_level_to_arg(level, _hparams): level = level / _LEVEL_DENOM * 30.0 level = _randomly_negate(level) return (level,) def _enhance_level_to_arg(level, _hparams): return (level / _LEVEL_DENOM * 1.8 + 0.1,) def _enhance_increasing_level_to_arg(level, _hparams): level = level / _LEVEL_DENOM * 0.9 level = max(0.1, 1.0 + _randomly_negate(level)) return (level,) def _minmax_level_to_arg(level, _hparams, min_val=0.0, max_val=1.0, clamp=True): level = level / _LEVEL_DENOM level = min_val + (max_val - min_val) * level if clamp: level = max(min_val, min(max_val, level)) return (level,) def _shear_level_to_arg(level, _hparams): level = level / _LEVEL_DENOM * 0.3 level = _randomly_negate(level) return (level,) def _translate_abs_level_to_arg(level, hparams): translate_const = hparams['translate_const'] level = level / _LEVEL_DENOM * float(translate_const) level = _randomly_negate(level) return (level,) def _translate_rel_level_to_arg(level, hparams): translate_pct = hparams.get('translate_pct', 0.45) level = level / _LEVEL_DENOM * translate_pct level = _randomly_negate(level) return (level,) def _posterize_level_to_arg(level, _hparams): return (int(level / _LEVEL_DENOM * 4),) def _posterize_increasing_level_to_arg(level, hparams): return (4 - _posterize_level_to_arg(level, hparams)[0],) def _posterize_original_level_to_arg(level, _hparams): return (int(level / _LEVEL_DENOM * 4) + 4,) def _solarize_level_to_arg(level, _hparams): return (min(256, int(level / _LEVEL_DENOM * 256)),) def _solarize_increasing_level_to_arg(level, _hparams): return (256 - _solarize_level_to_arg(level, _hparams)[0],) def _solarize_add_level_to_arg(level, _hparams): return (min(128, int(level / _LEVEL_DENOM * 110)),) LEVEL_TO_ARG = {'AutoContrast': None, 'Equalize': None, 'Invert': None, 'Rotate': _rotate_level_to_arg, 'Posterize': _posterize_level_to_arg, 'PosterizeIncreasing': _posterize_increasing_level_to_arg, 'PosterizeOriginal': _posterize_original_level_to_arg, 'Solarize': _solarize_level_to_arg, 'SolarizeIncreasing': _solarize_increasing_level_to_arg, 'SolarizeAdd': _solarize_add_level_to_arg, 'Color': _enhance_level_to_arg, 'ColorIncreasing': _enhance_increasing_level_to_arg, 'Contrast': _enhance_level_to_arg, 'ContrastIncreasing': _enhance_increasing_level_to_arg, 'Brightness': _enhance_level_to_arg, 'BrightnessIncreasing': _enhance_increasing_level_to_arg, 'Sharpness': _enhance_level_to_arg, 'SharpnessIncreasing': _enhance_increasing_level_to_arg, 'ShearX': _shear_level_to_arg, 'ShearY': _shear_level_to_arg, 'TranslateX': _translate_abs_level_to_arg, 'TranslateY': _translate_abs_level_to_arg, 'TranslateXRel': _translate_rel_level_to_arg, 'TranslateYRel': _translate_rel_level_to_arg, 'Desaturate': partial(_minmax_level_to_arg, min_val=0.5, max_val=1.0), 'GaussianBlur': partial(_minmax_level_to_arg, min_val=0.1, max_val=2.0), 'GaussianBlurRand': _minmax_level_to_arg} NAME_TO_OP = {'AutoContrast': auto_contrast, 'Equalize': equalize, 'Invert': invert, 'Rotate': rotate, 'Posterize': posterize, 'PosterizeIncreasing': posterize, 'PosterizeOriginal': posterize, 'Solarize': solarize, 'SolarizeIncreasing': solarize, 'SolarizeAdd': solarize_add, 'Color': color, 'ColorIncreasing': color, 'Contrast': contrast, 'ContrastIncreasing': contrast, 'Brightness': brightness, 'BrightnessIncreasing': brightness, 'Sharpness': sharpness, 'SharpnessIncreasing': sharpness, 'ShearX': shear_x, 'ShearY': shear_y, 'TranslateX': translate_x_abs, 'TranslateY': translate_y_abs, 'TranslateXRel': translate_x_rel, 'TranslateYRel': translate_y_rel, 'Desaturate': desaturate, 'GaussianBlur': gaussian_blur, 'GaussianBlurRand': gaussian_blur_rand} class AugmentOp: def __init__(self, name, prob=0.5, magnitude=10, hparams=None): hparams = hparams or _HPARAMS_DEFAULT self.name = name self.aug_fn = NAME_TO_OP[name] self.level_fn = LEVEL_TO_ARG[name] self.prob = prob self.magnitude = magnitude self.hparams = hparams.copy() self.kwargs = dict(fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION) self.magnitude_std = self.hparams.get('magnitude_std', 0) self.magnitude_max = self.hparams.get('magnitude_max', None) def __call__(self, img): if self.prob < 1.0 and random.random() > self.prob: return img magnitude = self.magnitude if self.magnitude_std > 0: if self.magnitude_std == float('inf'): magnitude = random.uniform(0, magnitude) elif self.magnitude_std > 0: magnitude = random.gauss(magnitude, self.magnitude_std) upper_bound = self.magnitude_max or _LEVEL_DENOM magnitude = max(0.0, min(magnitude, upper_bound)) level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() return self.aug_fn(img, *level_args, **self.kwargs) def __repr__(self): fs = self.__class__.__name__ + f'(name={self.name}, p={self.prob}' fs += f', m={self.magnitude}, mstd={self.magnitude_std}' if self.magnitude_max is not None: fs += f', mmax={self.magnitude_max}' fs += ')' return fs def auto_augment_policy_v0(hparams): policy = [[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)]] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_v0r(hparams): policy = [[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)]] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_original(hparams): policy = [[('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], [('Rotate', 0.8, 8), ('Color', 0.4, 0)], [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Rotate', 0.8, 8), ('Color', 1.0, 2)], [('Color', 0.8, 8), ('Solarize', 0.8, 7)], [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], [('Color', 0.4, 0), ('Equalize', 0.6, 3)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)]] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_originalr(hparams): policy = [[('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], [('Rotate', 0.8, 8), ('Color', 0.4, 0)], [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Rotate', 0.8, 8), ('Color', 1.0, 2)], [('Color', 0.8, 8), ('Solarize', 0.8, 7)], [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], [('Color', 0.4, 0), ('Equalize', 0.6, 3)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)]] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_3a(hparams): policy = [[('Solarize', 1.0, 5)], [('Desaturate', 1.0, 10)], [('GaussianBlurRand', 1.0, 10)]] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy(name='v0', hparams=None): hparams = hparams or _HPARAMS_DEFAULT if name == 'original': return auto_augment_policy_original(hparams) if name == 'originalr': return auto_augment_policy_originalr(hparams) if name == 'v0': return auto_augment_policy_v0(hparams) if name == 'v0r': return auto_augment_policy_v0r(hparams) if name == '3a': return auto_augment_policy_3a(hparams) assert False, f'Unknown AA policy {name}' class AutoAugment: def __init__(self, policy): self.policy = policy def __call__(self, img): sub_policy = random.choice(self.policy) for op in sub_policy: img = op(img) return img def __repr__(self): fs = self.__class__.__name__ + '(policy=' for p in self.policy: fs += '\n\t[' fs += ', '.join([str(op) for op in p]) fs += ']' fs += ')' return fs def auto_augment_transform(config_str: str, hparams: Optional[Dict]=None): config = config_str.split('-') policy_name = config[0] config = config[1:] for c in config: cs = re.split('(\\d.*)', c) if len(cs) < 2: continue (key, val) = cs[:2] if key == 'mstd': hparams.setdefault('magnitude_std', float(val)) else: assert False, 'Unknown AutoAugment config section' aa_policy = auto_augment_policy(policy_name, hparams=hparams) return AutoAugment(aa_policy) _RAND_TRANSFORMS = ['AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', 'SolarizeAdd', 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', 'TranslateXRel', 'TranslateYRel'] _RAND_INCREASING_TRANSFORMS = ['AutoContrast', 'Equalize', 'Invert', 'Rotate', 'PosterizeIncreasing', 'SolarizeIncreasing', 'SolarizeAdd', 'ColorIncreasing', 'ContrastIncreasing', 'BrightnessIncreasing', 'SharpnessIncreasing', 'ShearX', 'ShearY', 'TranslateXRel', 'TranslateYRel'] _RAND_3A = ['SolarizeIncreasing', 'Desaturate', 'GaussianBlur'] _RAND_WEIGHTED_3A = {'SolarizeIncreasing': 6, 'Desaturate': 6, 'GaussianBlur': 6, 'Rotate': 3, 'ShearX': 2, 'ShearY': 2, 'PosterizeIncreasing': 1, 'AutoContrast': 1, 'ColorIncreasing': 1, 'SharpnessIncreasing': 1, 'ContrastIncreasing': 1, 'BrightnessIncreasing': 1, 'Equalize': 1, 'Invert': 1} _RAND_WEIGHTED_0 = {'Rotate': 3, 'ShearX': 2, 'ShearY': 2, 'TranslateXRel': 1, 'TranslateYRel': 1, 'ColorIncreasing': 0.25, 'SharpnessIncreasing': 0.25, 'AutoContrast': 0.25, 'SolarizeIncreasing': 0.05, 'SolarizeAdd': 0.05, 'ContrastIncreasing': 0.05, 'BrightnessIncreasing': 0.05, 'Equalize': 0.05, 'PosterizeIncreasing': 0.05, 'Invert': 0.05} def _get_weighted_transforms(transforms: Dict): (transforms, probs) = list(zip(*transforms.items())) probs = np.array(probs) probs = probs / np.sum(probs) return (transforms, probs) def rand_augment_choices(name: str, increasing=True): if name == 'weights': return _RAND_WEIGHTED_0 if name == '3aw': return _RAND_WEIGHTED_3A if name == '3a': return _RAND_3A return _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS def rand_augment_ops(magnitude: Union[int, float]=10, prob: float=0.5, hparams: Optional[Dict]=None, transforms: Optional[Union[Dict, List]]=None): hparams = hparams or _HPARAMS_DEFAULT transforms = transforms or _RAND_TRANSFORMS return [AugmentOp(name, prob=prob, magnitude=magnitude, hparams=hparams) for name in transforms] class RandAugment: def __init__(self, ops, num_layers=2, choice_weights=None): self.ops = ops self.num_layers = num_layers self.choice_weights = choice_weights def __call__(self, img): ops = np.random.choice(self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights) for op in ops: img = op(img) return img def __repr__(self): fs = self.__class__.__name__ + f'(n={self.num_layers}, ops=' for op in self.ops: fs += f'\n\t{op}' fs += ')' return fs def rand_augment_transform(config_str: str, hparams: Optional[Dict]=None, transforms: Optional[Union[str, Dict, List]]=None): magnitude = _LEVEL_DENOM num_layers = 2 increasing = False prob = 0.5 config = config_str.split('-') assert config[0] == 'rand' config = config[1:] for c in config: if c.startswith('t'): val = str(c[1:]) if transforms is None: transforms = val else: cs = re.split('(\\d.*)', c) if len(cs) < 2: continue (key, val) = cs[:2] if key == 'mstd': mstd = float(val) if mstd > 100: mstd = float('inf') hparams.setdefault('magnitude_std', mstd) elif key == 'mmax': hparams.setdefault('magnitude_max', int(val)) elif key == 'inc': if bool(val): increasing = True elif key == 'm': magnitude = int(val) elif key == 'n': num_layers = int(val) elif key == 'p': prob = float(val) else: assert False, 'Unknown RandAugment config section' if isinstance(transforms, str): transforms = rand_augment_choices(transforms, increasing=increasing) elif transforms is None: transforms = _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS choice_weights = None if isinstance(transforms, Dict): (transforms, choice_weights) = _get_weighted_transforms(transforms) ra_ops = rand_augment_ops(magnitude=magnitude, prob=prob, hparams=hparams, transforms=transforms) return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) _AUGMIX_TRANSFORMS = ['AutoContrast', 'ColorIncreasing', 'ContrastIncreasing', 'BrightnessIncreasing', 'SharpnessIncreasing', 'Equalize', 'Rotate', 'PosterizeIncreasing', 'SolarizeIncreasing', 'ShearX', 'ShearY', 'TranslateXRel', 'TranslateYRel'] def augmix_ops(magnitude: Union[int, float]=10, hparams: Optional[Dict]=None, transforms: Optional[Union[str, Dict, List]]=None): hparams = hparams or _HPARAMS_DEFAULT transforms = transforms or _AUGMIX_TRANSFORMS return [AugmentOp(name, prob=1.0, magnitude=magnitude, hparams=hparams) for name in transforms] class AugMixAugment: def __init__(self, ops, alpha=1.0, width=3, depth=-1, blended=False): self.ops = ops self.alpha = alpha self.width = width self.depth = depth self.blended = blended def _calc_blended_weights(self, ws, m): ws = ws * m cump = 1.0 rws = [] for w in ws[::-1]: alpha = w / cump cump *= 1 - alpha rws.append(alpha) return np.array(rws[::-1], dtype=np.float32) def _apply_blended(self, img, mixing_weights, m): img_orig = img.copy() ws = self._calc_blended_weights(mixing_weights, m) for w in ws: depth = self.depth if self.depth > 0 else np.random.randint(1, 4) ops = np.random.choice(self.ops, depth, replace=True) img_aug = img_orig for op in ops: img_aug = op(img_aug) img = Image.blend(img, img_aug, w) return img def _apply_basic(self, img, mixing_weights, m): img_shape = (img.size[0], img.size[1], len(img.getbands())) mixed = np.zeros(img_shape, dtype=np.float32) for mw in mixing_weights: depth = self.depth if self.depth > 0 else np.random.randint(1, 4) ops = np.random.choice(self.ops, depth, replace=True) img_aug = img for op in ops: img_aug = op(img_aug) mixed += mw * np.asarray(img_aug, dtype=np.float32) np.clip(mixed, 0, 255.0, out=mixed) mixed = Image.fromarray(mixed.astype(np.uint8)) return Image.blend(img, mixed, m) def __call__(self, img): mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) m = np.float32(np.random.beta(self.alpha, self.alpha)) if self.blended: mixed = self._apply_blended(img, mixing_weights, m) else: mixed = self._apply_basic(img, mixing_weights, m) return mixed def __repr__(self): fs = self.__class__.__name__ + f'(alpha={self.alpha}, width={self.width}, depth={self.depth}, ops=' for op in self.ops: fs += f'\n\t{op}' fs += ')' return fs def augment_and_mix_transform(config_str: str, hparams: Optional[Dict]=None): magnitude = 3 width = 3 depth = -1 alpha = 1.0 blended = False config = config_str.split('-') assert config[0] == 'augmix' config = config[1:] for c in config: cs = re.split('(\\d.*)', c) if len(cs) < 2: continue (key, val) = cs[:2] if key == 'mstd': hparams.setdefault('magnitude_std', float(val)) elif key == 'm': magnitude = int(val) elif key == 'w': width = int(val) elif key == 'd': depth = int(val) elif key == 'a': alpha = float(val) elif key == 'b': blended = bool(val) else: assert False, 'Unknown AugMix config section' hparams.setdefault('magnitude_std', float('inf')) ops = augmix_ops(magnitude=magnitude, hparams=hparams) return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) # File: pytorch-image-models-main/timm/data/config.py import logging from .constants import * _logger = logging.getLogger(__name__) def resolve_data_config(args=None, pretrained_cfg=None, model=None, use_test_size=False, verbose=False): assert model or args or pretrained_cfg, 'At least one of model, args, or pretrained_cfg required for data config.' args = args or {} pretrained_cfg = pretrained_cfg or {} if not pretrained_cfg and model is not None and hasattr(model, 'pretrained_cfg'): pretrained_cfg = model.pretrained_cfg data_config = {} in_chans = 3 if args.get('in_chans', None) is not None: in_chans = args['in_chans'] elif args.get('chans', None) is not None: in_chans = args['chans'] input_size = (in_chans, 224, 224) if args.get('input_size', None) is not None: assert isinstance(args['input_size'], (tuple, list)) assert len(args['input_size']) == 3 input_size = tuple(args['input_size']) in_chans = input_size[0] elif args.get('img_size', None) is not None: assert isinstance(args['img_size'], int) input_size = (in_chans, args['img_size'], args['img_size']) elif use_test_size and pretrained_cfg.get('test_input_size', None) is not None: input_size = pretrained_cfg['test_input_size'] elif pretrained_cfg.get('input_size', None) is not None: input_size = pretrained_cfg['input_size'] data_config['input_size'] = input_size data_config['interpolation'] = 'bicubic' if args.get('interpolation', None): data_config['interpolation'] = args['interpolation'] elif pretrained_cfg.get('interpolation', None): data_config['interpolation'] = pretrained_cfg['interpolation'] data_config['mean'] = IMAGENET_DEFAULT_MEAN if args.get('mean', None) is not None: mean = tuple(args['mean']) if len(mean) == 1: mean = tuple(list(mean) * in_chans) else: assert len(mean) == in_chans data_config['mean'] = mean elif pretrained_cfg.get('mean', None): data_config['mean'] = pretrained_cfg['mean'] data_config['std'] = IMAGENET_DEFAULT_STD if args.get('std', None) is not None: std = tuple(args['std']) if len(std) == 1: std = tuple(list(std) * in_chans) else: assert len(std) == in_chans data_config['std'] = std elif pretrained_cfg.get('std', None): data_config['std'] = pretrained_cfg['std'] crop_pct = DEFAULT_CROP_PCT if args.get('crop_pct', None): crop_pct = args['crop_pct'] elif use_test_size and pretrained_cfg.get('test_crop_pct', None): crop_pct = pretrained_cfg['test_crop_pct'] elif pretrained_cfg.get('crop_pct', None): crop_pct = pretrained_cfg['crop_pct'] data_config['crop_pct'] = crop_pct crop_mode = DEFAULT_CROP_MODE if args.get('crop_mode', None): crop_mode = args['crop_mode'] elif pretrained_cfg.get('crop_mode', None): crop_mode = pretrained_cfg['crop_mode'] data_config['crop_mode'] = crop_mode if verbose: _logger.info('Data processing configuration for current model + dataset:') for (n, v) in data_config.items(): _logger.info('\t%s: %s' % (n, str(v))) return data_config def resolve_model_data_config(model, args=None, pretrained_cfg=None, use_test_size=False, verbose=False): return resolve_data_config(args=args, pretrained_cfg=pretrained_cfg, model=model, use_test_size=use_test_size, verbose=verbose) # File: pytorch-image-models-main/timm/data/constants.py DEFAULT_CROP_PCT = 0.875 DEFAULT_CROP_MODE = 'center' IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) IMAGENET_DPN_STD = tuple([1 / (0.0167 * 255)] * 3) OPENAI_CLIP_MEAN = (0.48145466, 0.4578275, 0.40821073) OPENAI_CLIP_STD = (0.26862954, 0.26130258, 0.27577711) # File: pytorch-image-models-main/timm/data/dataset.py """""" import io import logging from typing import Optional import torch import torch.utils.data as data from PIL import Image from .readers import create_reader _logger = logging.getLogger(__name__) _ERROR_RETRY = 50 class ImageDataset(data.Dataset): def __init__(self, root, reader=None, split='train', class_map=None, load_bytes=False, input_img_mode='RGB', transform=None, target_transform=None, **kwargs): if reader is None or isinstance(reader, str): reader = create_reader(reader or '', root=root, split=split, class_map=class_map, **kwargs) self.reader = reader self.load_bytes = load_bytes self.input_img_mode = input_img_mode self.transform = transform self.target_transform = target_transform self._consecutive_errors = 0 def __getitem__(self, index): (img, target) = self.reader[index] try: img = img.read() if self.load_bytes else Image.open(img) except Exception as e: _logger.warning(f'Skipped sample (index {index}, file {self.reader.filename(index)}). {str(e)}') self._consecutive_errors += 1 if self._consecutive_errors < _ERROR_RETRY: return self.__getitem__((index + 1) % len(self.reader)) else: raise e self._consecutive_errors = 0 if self.input_img_mode and (not self.load_bytes): img = img.convert(self.input_img_mode) if self.transform is not None: img = self.transform(img) if target is None: target = -1 elif self.target_transform is not None: target = self.target_transform(target) return (img, target) def __len__(self): return len(self.reader) def filename(self, index, basename=False, absolute=False): return self.reader.filename(index, basename, absolute) def filenames(self, basename=False, absolute=False): return self.reader.filenames(basename, absolute) class IterableImageDataset(data.IterableDataset): def __init__(self, root, reader=None, split='train', class_map=None, is_training=False, batch_size=1, num_samples=None, seed=42, repeats=0, download=False, input_img_mode='RGB', input_key=None, target_key=None, transform=None, target_transform=None, max_steps=None): assert reader is not None if isinstance(reader, str): self.reader = create_reader(reader, root=root, split=split, class_map=class_map, is_training=is_training, batch_size=batch_size, num_samples=num_samples, seed=seed, repeats=repeats, download=download, input_img_mode=input_img_mode, input_key=input_key, target_key=target_key, max_steps=max_steps) else: self.reader = reader self.transform = transform self.target_transform = target_transform self._consecutive_errors = 0 def __iter__(self): for (img, target) in self.reader: if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) yield (img, target) def __len__(self): if hasattr(self.reader, '__len__'): return len(self.reader) else: return 0 def set_epoch(self, count): if hasattr(self.reader, 'set_epoch'): self.reader.set_epoch(count) def set_loader_cfg(self, num_workers: Optional[int]=None): if hasattr(self.reader, 'set_loader_cfg'): self.reader.set_loader_cfg(num_workers=num_workers) def filename(self, index, basename=False, absolute=False): assert False, 'Filename lookup by index not supported, use filenames().' def filenames(self, basename=False, absolute=False): return self.reader.filenames(basename, absolute) class AugMixDataset(torch.utils.data.Dataset): def __init__(self, dataset, num_splits=2): self.augmentation = None self.normalize = None self.dataset = dataset if self.dataset.transform is not None: self._set_transforms(self.dataset.transform) self.num_splits = num_splits def _set_transforms(self, x): assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' self.dataset.transform = x[0] self.augmentation = x[1] self.normalize = x[2] @property def transform(self): return self.dataset.transform @transform.setter def transform(self, x): self._set_transforms(x) def _normalize(self, x): return x if self.normalize is None else self.normalize(x) def __getitem__(self, i): (x, y) = self.dataset[i] x_list = [self._normalize(x)] for _ in range(self.num_splits - 1): x_list.append(self._normalize(self.augmentation(x))) return (tuple(x_list), y) def __len__(self): return len(self.dataset) # File: pytorch-image-models-main/timm/data/dataset_factory.py """""" import os from typing import Optional from torchvision.datasets import CIFAR100, CIFAR10, MNIST, KMNIST, FashionMNIST, ImageFolder try: from torchvision.datasets import Places365 has_places365 = True except ImportError: has_places365 = False try: from torchvision.datasets import INaturalist has_inaturalist = True except ImportError: has_inaturalist = False try: from torchvision.datasets import QMNIST has_qmnist = True except ImportError: has_qmnist = False try: from torchvision.datasets import ImageNet has_imagenet = True except ImportError: has_imagenet = False from .dataset import IterableImageDataset, ImageDataset _TORCH_BASIC_DS = dict(cifar10=CIFAR10, cifar100=CIFAR100, mnist=MNIST, kmnist=KMNIST, fashion_mnist=FashionMNIST) _TRAIN_SYNONYM = dict(train=None, training=None) _EVAL_SYNONYM = dict(val=None, valid=None, validation=None, eval=None, evaluation=None) def _search_split(root, split): split_name = split.split('[')[0] try_root = os.path.join(root, split_name) if os.path.exists(try_root): return try_root def _try(syn): for s in syn: try_root = os.path.join(root, s) if os.path.exists(try_root): return try_root return root if split_name in _TRAIN_SYNONYM: root = _try(_TRAIN_SYNONYM) elif split_name in _EVAL_SYNONYM: root = _try(_EVAL_SYNONYM) return root def create_dataset(name: str, root: Optional[str]=None, split: str='validation', search_split: bool=True, class_map: dict=None, load_bytes: bool=False, is_training: bool=False, download: bool=False, batch_size: int=1, num_samples: Optional[int]=None, seed: int=42, repeats: int=0, input_img_mode: str='RGB', **kwargs): kwargs = {k: v for (k, v) in kwargs.items() if v is not None} name = name.lower() if name.startswith('torch/'): name = name.split('/', 2)[-1] torch_kwargs = dict(root=root, download=download, **kwargs) if name in _TORCH_BASIC_DS: ds_class = _TORCH_BASIC_DS[name] use_train = split in _TRAIN_SYNONYM ds = ds_class(train=use_train, **torch_kwargs) elif name == 'inaturalist' or name == 'inat': assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist' target_type = 'full' split_split = split.split('/') if len(split_split) > 1: target_type = split_split[0].split('_') if len(target_type) == 1: target_type = target_type[0] split = split_split[-1] if split in _TRAIN_SYNONYM: split = '2021_train' elif split in _EVAL_SYNONYM: split = '2021_valid' ds = INaturalist(version=split, target_type=target_type, **torch_kwargs) elif name == 'places365': assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.' if split in _TRAIN_SYNONYM: split = 'train-standard' elif split in _EVAL_SYNONYM: split = 'val' ds = Places365(split=split, **torch_kwargs) elif name == 'qmnist': assert has_qmnist, 'Please update to a newer PyTorch and torchvision for QMNIST dataset.' use_train = split in _TRAIN_SYNONYM ds = QMNIST(train=use_train, **torch_kwargs) elif name == 'imagenet': assert has_imagenet, 'Please update to a newer PyTorch and torchvision for ImageNet dataset.' if split in _EVAL_SYNONYM: split = 'val' ds = ImageNet(split=split, **torch_kwargs) elif name == 'image_folder' or name == 'folder': if search_split and os.path.isdir(root): root = _search_split(root, split) ds = ImageFolder(root, **kwargs) else: assert False, f'Unknown torchvision dataset {name}' elif name.startswith('hfds/'): ds = ImageDataset(root, reader=name, split=split, class_map=class_map, input_img_mode=input_img_mode, **kwargs) elif name.startswith('hfids/'): ds = IterableImageDataset(root, reader=name, split=split, class_map=class_map, is_training=is_training, download=download, batch_size=batch_size, num_samples=num_samples, repeats=repeats, seed=seed, input_img_mode=input_img_mode, **kwargs) elif name.startswith('tfds/'): ds = IterableImageDataset(root, reader=name, split=split, class_map=class_map, is_training=is_training, download=download, batch_size=batch_size, num_samples=num_samples, repeats=repeats, seed=seed, input_img_mode=input_img_mode, **kwargs) elif name.startswith('wds/'): ds = IterableImageDataset(root, reader=name, split=split, class_map=class_map, is_training=is_training, batch_size=batch_size, num_samples=num_samples, repeats=repeats, seed=seed, input_img_mode=input_img_mode, **kwargs) else: if search_split and os.path.isdir(root): root = _search_split(root, split) ds = ImageDataset(root, reader=name, class_map=class_map, load_bytes=load_bytes, input_img_mode=input_img_mode, **kwargs) return ds # File: pytorch-image-models-main/timm/data/dataset_info.py from abc import ABC, abstractmethod from typing import Dict, List, Optional, Union class DatasetInfo(ABC): def __init__(self): pass @abstractmethod def num_classes(self): pass @abstractmethod def label_names(self): pass @abstractmethod def label_descriptions(self, detailed: bool=False, as_dict: bool=False) -> Union[List[str], Dict[str, str]]: pass @abstractmethod def index_to_label_name(self, index) -> str: pass @abstractmethod def index_to_description(self, index: int, detailed: bool=False) -> str: pass @abstractmethod def label_name_to_description(self, label: str, detailed: bool=False) -> str: pass class CustomDatasetInfo(DatasetInfo): def __init__(self, label_names: Union[List[str], Dict[int, str]], label_descriptions: Optional[Dict[str, str]]=None): super().__init__() assert len(label_names) > 0 self._label_names = label_names self._label_descriptions = label_descriptions if self._label_descriptions is not None: assert isinstance(self._label_descriptions, dict) for n in self._label_names: assert n in self._label_descriptions def num_classes(self): return len(self._label_names) def label_names(self): return self._label_names def label_descriptions(self, detailed: bool=False, as_dict: bool=False) -> Union[List[str], Dict[str, str]]: return self._label_descriptions def label_name_to_description(self, label: str, detailed: bool=False) -> str: if self._label_descriptions: return self._label_descriptions[label] return label def index_to_label_name(self, index) -> str: assert 0 <= index < len(self._label_names) return self._label_names[index] def index_to_description(self, index: int, detailed: bool=False) -> str: label = self.index_to_label_name(index) return self.label_name_to_description(label, detailed=detailed) # File: pytorch-image-models-main/timm/data/distributed_sampler.py import math import torch from torch.utils.data import Sampler import torch.distributed as dist class OrderedDistributedSampler(Sampler): def __init__(self, dataset, num_replicas=None, rank=None): if num_replicas is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas def __iter__(self): indices = list(range(len(self.dataset))) indices += indices[:self.total_size - len(indices)] assert len(indices) == self.total_size indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples class RepeatAugSampler(Sampler): def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, num_repeats=3, selected_round=256, selected_ratio=0): if num_replicas is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.shuffle = shuffle self.num_repeats = num_repeats self.epoch = 0 self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas selected_ratio = selected_ratio or num_replicas if selected_round: self.num_selected_samples = int(math.floor(len(self.dataset) // selected_round * selected_round / selected_ratio)) else: self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio)) def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) if self.shuffle: indices = torch.randperm(len(self.dataset), generator=g) else: indices = torch.arange(start=0, end=len(self.dataset)) if isinstance(self.num_repeats, float) and (not self.num_repeats.is_integer()): repeat_size = math.ceil(self.num_repeats * len(self.dataset)) indices = indices[torch.tensor([int(i // self.num_repeats) for i in range(repeat_size)])] else: indices = torch.repeat_interleave(indices, repeats=int(self.num_repeats), dim=0) indices = indices.tolist() padding_size = self.total_size - len(indices) if padding_size > 0: indices += indices[:padding_size] assert len(indices) == self.total_size indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices[:self.num_selected_samples]) def __len__(self): return self.num_selected_samples def set_epoch(self, epoch): self.epoch = epoch # File: pytorch-image-models-main/timm/data/imagenet_info.py import csv import os import pkgutil import re from typing import Dict, List, Optional, Union from .dataset_info import DatasetInfo _NUM_CLASSES_TO_SUBSET = {1000: 'imagenet-1k', 11221: 'imagenet-21k-miil', 11821: 'imagenet-12k', 21841: 'imagenet-22k', 21842: 'imagenet-22k-ms', 21843: 'imagenet-21k-goog'} _SUBSETS = {'imagenet1k': 'imagenet_synsets.txt', 'imagenet12k': 'imagenet12k_synsets.txt', 'imagenet22k': 'imagenet22k_synsets.txt', 'imagenet21k': 'imagenet21k_goog_synsets.txt', 'imagenet21kgoog': 'imagenet21k_goog_synsets.txt', 'imagenet21kmiil': 'imagenet21k_miil_synsets.txt', 'imagenet22kms': 'imagenet22k_ms_synsets.txt'} _LEMMA_FILE = 'imagenet_synset_to_lemma.txt' _DEFINITION_FILE = 'imagenet_synset_to_definition.txt' def infer_imagenet_subset(model_or_cfg) -> Optional[str]: if isinstance(model_or_cfg, dict): num_classes = model_or_cfg.get('num_classes', None) else: num_classes = getattr(model_or_cfg, 'num_classes', None) if not num_classes: pretrained_cfg = getattr(model_or_cfg, 'pretrained_cfg', {}) num_classes = pretrained_cfg.get('num_classes', None) if not num_classes or num_classes not in _NUM_CLASSES_TO_SUBSET: return None return _NUM_CLASSES_TO_SUBSET[num_classes] class ImageNetInfo(DatasetInfo): def __init__(self, subset: str='imagenet-1k'): super().__init__() subset = re.sub('[-_\\s]', '', subset.lower()) assert subset in _SUBSETS, f'Unknown imagenet subset {subset}.' synset_file = _SUBSETS[subset] synset_data = pkgutil.get_data(__name__, os.path.join('_info', synset_file)) self._synsets = synset_data.decode('utf-8').splitlines() lemma_data = pkgutil.get_data(__name__, os.path.join('_info', _LEMMA_FILE)) reader = csv.reader(lemma_data.decode('utf-8').splitlines(), delimiter='\t') self._lemmas = dict(reader) definition_data = pkgutil.get_data(__name__, os.path.join('_info', _DEFINITION_FILE)) reader = csv.reader(definition_data.decode('utf-8').splitlines(), delimiter='\t') self._definitions = dict(reader) def num_classes(self): return len(self._synsets) def label_names(self): return self._synsets def label_descriptions(self, detailed: bool=False, as_dict: bool=False) -> Union[List[str], Dict[str, str]]: if as_dict: return {label: self.label_name_to_description(label, detailed=detailed) for label in self._synsets} else: return [self.label_name_to_description(label, detailed=detailed) for label in self._synsets] def index_to_label_name(self, index) -> str: assert 0 <= index < len(self._synsets), f'Index ({index}) out of range for dataset with {len(self._synsets)} classes.' return self._synsets[index] def index_to_description(self, index: int, detailed: bool=False) -> str: label = self.index_to_label_name(index) return self.label_name_to_description(label, detailed=detailed) def label_name_to_description(self, label: str, detailed: bool=False) -> str: if detailed: description = f'{self._lemmas[label]}: {self._definitions[label]}' else: description = f'{self._lemmas[label]}' return description # File: pytorch-image-models-main/timm/data/loader.py """""" import logging import random from contextlib import suppress from functools import partial from itertools import repeat from typing import Callable, Optional, Tuple, Union import torch import torch.utils.data import numpy as np from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .dataset import IterableImageDataset, ImageDataset from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler from .random_erasing import RandomErasing from .mixup import FastCollateMixup from .transforms_factory import create_transform _logger = logging.getLogger(__name__) def fast_collate(batch): assert isinstance(batch[0], tuple) batch_size = len(batch) if isinstance(batch[0][0], tuple): inner_tuple_size = len(batch[0][0]) flattened_batch_size = batch_size * inner_tuple_size targets = torch.zeros(flattened_batch_size, dtype=torch.int64) tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) for i in range(batch_size): assert len(batch[i][0]) == inner_tuple_size for j in range(inner_tuple_size): targets[i + j * batch_size] = batch[i][1] tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) return (tensor, targets) elif isinstance(batch[0][0], np.ndarray): targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) assert len(targets) == batch_size tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) for i in range(batch_size): tensor[i] += torch.from_numpy(batch[i][0]) return (tensor, targets) elif isinstance(batch[0][0], torch.Tensor): targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) assert len(targets) == batch_size tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) for i in range(batch_size): tensor[i].copy_(batch[i][0]) return (tensor, targets) else: assert False def adapt_to_chs(x, n): if not isinstance(x, (tuple, list)): x = tuple(repeat(x, n)) elif len(x) != n: x_mean = np.mean(x).item() x = (x_mean,) * n _logger.warning(f'Pretrained mean/std different shape than model, using avg value {x}.') else: assert len(x) == n, 'normalization stats must match image channels' return x class PrefetchLoader: def __init__(self, loader, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, channels=3, device=torch.device('cuda'), img_dtype=torch.float32, fp16=False, re_prob=0.0, re_mode='const', re_count=1, re_num_splits=0): mean = adapt_to_chs(mean, channels) std = adapt_to_chs(std, channels) normalization_shape = (1, channels, 1, 1) self.loader = loader self.device = device if fp16: img_dtype = torch.float16 self.img_dtype = img_dtype self.mean = torch.tensor([x * 255 for x in mean], device=device, dtype=img_dtype).view(normalization_shape) self.std = torch.tensor([x * 255 for x in std], device=device, dtype=img_dtype).view(normalization_shape) if re_prob > 0.0: self.random_erasing = RandomErasing(probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device=device) else: self.random_erasing = None self.is_cuda = torch.cuda.is_available() and device.type == 'cuda' def __iter__(self): first = True if self.is_cuda: stream = torch.cuda.Stream() stream_context = partial(torch.cuda.stream, stream=stream) else: stream = None stream_context = suppress for (next_input, next_target) in self.loader: with stream_context(): next_input = next_input.to(device=self.device, non_blocking=True) next_target = next_target.to(device=self.device, non_blocking=True) next_input = next_input.to(self.img_dtype).sub_(self.mean).div_(self.std) if self.random_erasing is not None: next_input = self.random_erasing(next_input) if not first: yield (input, target) else: first = False if stream is not None: torch.cuda.current_stream().wait_stream(stream) input = next_input target = next_target yield (input, target) def __len__(self): return len(self.loader) @property def sampler(self): return self.loader.sampler @property def dataset(self): return self.loader.dataset @property def mixup_enabled(self): if isinstance(self.loader.collate_fn, FastCollateMixup): return self.loader.collate_fn.mixup_enabled else: return False @mixup_enabled.setter def mixup_enabled(self, x): if isinstance(self.loader.collate_fn, FastCollateMixup): self.loader.collate_fn.mixup_enabled = x def _worker_init(worker_id, worker_seeding='all'): worker_info = torch.utils.data.get_worker_info() assert worker_info.id == worker_id if isinstance(worker_seeding, Callable): seed = worker_seeding(worker_info) random.seed(seed) torch.manual_seed(seed) np.random.seed(seed % (2 ** 32 - 1)) else: assert worker_seeding in ('all', 'part') if worker_seeding == 'all': np.random.seed(worker_info.seed % (2 ** 32 - 1)) def create_loader(dataset: Union[ImageDataset, IterableImageDataset], input_size: Union[int, Tuple[int, int], Tuple[int, int, int]], batch_size: int, is_training: bool=False, no_aug: bool=False, re_prob: float=0.0, re_mode: str='const', re_count: int=1, re_split: bool=False, train_crop_mode: Optional[str]=None, scale: Optional[Tuple[float, float]]=None, ratio: Optional[Tuple[float, float]]=None, hflip: float=0.5, vflip: float=0.0, color_jitter: float=0.4, color_jitter_prob: Optional[float]=None, grayscale_prob: float=0.0, gaussian_blur_prob: float=0.0, auto_augment: Optional[str]=None, num_aug_repeats: int=0, num_aug_splits: int=0, interpolation: str='bilinear', mean: Tuple[float, ...]=IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...]=IMAGENET_DEFAULT_STD, num_workers: int=1, distributed: bool=False, crop_pct: Optional[float]=None, crop_mode: Optional[str]=None, crop_border_pixels: Optional[int]=None, collate_fn: Optional[Callable]=None, pin_memory: bool=False, fp16: bool=False, img_dtype: torch.dtype=torch.float32, device: torch.device=torch.device('cuda'), use_prefetcher: bool=True, use_multi_epochs_loader: bool=False, persistent_workers: bool=True, worker_seeding: str='all', tf_preprocessing: bool=False): re_num_splits = 0 if re_split: re_num_splits = num_aug_splits or 2 dataset.transform = create_transform(input_size, is_training=is_training, no_aug=no_aug, train_crop_mode=train_crop_mode, scale=scale, ratio=ratio, hflip=hflip, vflip=vflip, color_jitter=color_jitter, color_jitter_prob=color_jitter_prob, grayscale_prob=grayscale_prob, gaussian_blur_prob=gaussian_blur_prob, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, crop_mode=crop_mode, crop_border_pixels=crop_border_pixels, re_prob=re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits, tf_preprocessing=tf_preprocessing, use_prefetcher=use_prefetcher, separate=num_aug_splits > 0) if isinstance(dataset, IterableImageDataset): dataset.set_loader_cfg(num_workers=num_workers) sampler = None if distributed and (not isinstance(dataset, torch.utils.data.IterableDataset)): if is_training: if num_aug_repeats: sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats) else: sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: sampler = OrderedDistributedSampler(dataset) else: assert num_aug_repeats == 0, 'RepeatAugment not currently supported in non-distributed or IterableDataset use' if collate_fn is None: collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate loader_class = torch.utils.data.DataLoader if use_multi_epochs_loader: loader_class = MultiEpochsDataLoader loader_args = dict(batch_size=batch_size, shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training, num_workers=num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=is_training, worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding), persistent_workers=persistent_workers) try: loader = loader_class(dataset, **loader_args) except TypeError as e: loader_args.pop('persistent_workers') loader = loader_class(dataset, **loader_args) if use_prefetcher: prefetch_re_prob = re_prob if is_training and (not no_aug) else 0.0 loader = PrefetchLoader(loader, mean=mean, std=std, channels=input_size[0], device=device, fp16=fp16, img_dtype=img_dtype, re_prob=prefetch_re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits) return loader class MultiEpochsDataLoader(torch.utils.data.DataLoader): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._DataLoader__initialized = False if self.batch_sampler is None: self.sampler = _RepeatSampler(self.sampler) else: self.batch_sampler = _RepeatSampler(self.batch_sampler) self._DataLoader__initialized = True self.iterator = super().__iter__() def __len__(self): return len(self.sampler) if self.batch_sampler is None else len(self.batch_sampler.sampler) def __iter__(self): for i in range(len(self)): yield next(self.iterator) class _RepeatSampler(object): def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: yield from iter(self.sampler) # File: pytorch-image-models-main/timm/data/mixup.py """""" import numpy as np import torch def one_hot(x, num_classes, on_value=1.0, off_value=0.0): x = x.long().view(-1, 1) return torch.full((x.size()[0], num_classes), off_value, device=x.device).scatter_(1, x, on_value) def mixup_target(target, num_classes, lam=1.0, smoothing=0.0): off_value = smoothing / num_classes on_value = 1.0 - smoothing + off_value y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value) y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value) return y1 * lam + y2 * (1.0 - lam) def rand_bbox(img_shape, lam, margin=0.0, count=None): ratio = np.sqrt(1 - lam) (img_h, img_w) = img_shape[-2:] (cut_h, cut_w) = (int(img_h * ratio), int(img_w * ratio)) (margin_y, margin_x) = (int(margin * cut_h), int(margin * cut_w)) cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) yl = np.clip(cy - cut_h // 2, 0, img_h) yh = np.clip(cy + cut_h // 2, 0, img_h) xl = np.clip(cx - cut_w // 2, 0, img_w) xh = np.clip(cx + cut_w // 2, 0, img_w) return (yl, yh, xl, xh) def rand_bbox_minmax(img_shape, minmax, count=None): assert len(minmax) == 2 (img_h, img_w) = img_shape[-2:] cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count) cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count) yl = np.random.randint(0, img_h - cut_h, size=count) xl = np.random.randint(0, img_w - cut_w, size=count) yu = yl + cut_h xu = xl + cut_w return (yl, yu, xl, xu) def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None): if ratio_minmax is not None: (yl, yu, xl, xu) = rand_bbox_minmax(img_shape, ratio_minmax, count=count) else: (yl, yu, xl, xu) = rand_bbox(img_shape, lam, count=count) if correct_lam or ratio_minmax is not None: bbox_area = (yu - yl) * (xu - xl) lam = 1.0 - bbox_area / float(img_shape[-2] * img_shape[-1]) return ((yl, yu, xl, xu), lam) class Mixup: def __init__(self, mixup_alpha=1.0, cutmix_alpha=0.0, cutmix_minmax=None, prob=1.0, switch_prob=0.5, mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000): self.mixup_alpha = mixup_alpha self.cutmix_alpha = cutmix_alpha self.cutmix_minmax = cutmix_minmax if self.cutmix_minmax is not None: assert len(self.cutmix_minmax) == 2 self.cutmix_alpha = 1.0 self.mix_prob = prob self.switch_prob = switch_prob self.label_smoothing = label_smoothing self.num_classes = num_classes self.mode = mode self.correct_lam = correct_lam self.mixup_enabled = True def _params_per_elem(self, batch_size): lam = np.ones(batch_size, dtype=np.float32) use_cutmix = np.zeros(batch_size, dtype=bool) if self.mixup_enabled: if self.mixup_alpha > 0.0 and self.cutmix_alpha > 0.0: use_cutmix = np.random.rand(batch_size) < self.switch_prob lam_mix = np.where(use_cutmix, np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size), np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)) elif self.mixup_alpha > 0.0: lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size) elif self.cutmix_alpha > 0.0: use_cutmix = np.ones(batch_size, dtype=bool) lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size) else: assert False, 'One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.' lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam) return (lam, use_cutmix) def _params_per_batch(self): lam = 1.0 use_cutmix = False if self.mixup_enabled and np.random.rand() < self.mix_prob: if self.mixup_alpha > 0.0 and self.cutmix_alpha > 0.0: use_cutmix = np.random.rand() < self.switch_prob lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else np.random.beta(self.mixup_alpha, self.mixup_alpha) elif self.mixup_alpha > 0.0: lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha) elif self.cutmix_alpha > 0.0: use_cutmix = True lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) else: assert False, 'One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.' lam = float(lam_mix) return (lam, use_cutmix) def _mix_elem(self, x): batch_size = len(x) (lam_batch, use_cutmix) = self._params_per_elem(batch_size) x_orig = x.clone() for i in range(batch_size): j = batch_size - i - 1 lam = lam_batch[i] if lam != 1.0: if use_cutmix[i]: ((yl, yh, xl, xh), lam) = cutmix_bbox_and_lam(x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] lam_batch[i] = lam else: x[i] = x[i] * lam + x_orig[j] * (1 - lam) return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) def _mix_pair(self, x): batch_size = len(x) (lam_batch, use_cutmix) = self._params_per_elem(batch_size // 2) x_orig = x.clone() for i in range(batch_size // 2): j = batch_size - i - 1 lam = lam_batch[i] if lam != 1.0: if use_cutmix[i]: ((yl, yh, xl, xh), lam) = cutmix_bbox_and_lam(x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh] lam_batch[i] = lam else: x[i] = x[i] * lam + x_orig[j] * (1 - lam) x[j] = x[j] * lam + x_orig[i] * (1 - lam) lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) def _mix_batch(self, x): (lam, use_cutmix) = self._params_per_batch() if lam == 1.0: return 1.0 if use_cutmix: ((yl, yh, xl, xh), lam) = cutmix_bbox_and_lam(x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] else: x_flipped = x.flip(0).mul_(1.0 - lam) x.mul_(lam).add_(x_flipped) return lam def __call__(self, x, target): assert len(x) % 2 == 0, 'Batch size should be even when using this' if self.mode == 'elem': lam = self._mix_elem(x) elif self.mode == 'pair': lam = self._mix_pair(x) else: lam = self._mix_batch(x) target = mixup_target(target, self.num_classes, lam, self.label_smoothing) return (x, target) class FastCollateMixup(Mixup): def _mix_elem_collate(self, output, batch, half=False): batch_size = len(batch) num_elem = batch_size // 2 if half else batch_size assert len(output) == num_elem (lam_batch, use_cutmix) = self._params_per_elem(num_elem) for i in range(num_elem): j = batch_size - i - 1 lam = lam_batch[i] mixed = batch[i][0] if lam != 1.0: if use_cutmix[i]: if not half: mixed = mixed.copy() ((yl, yh, xl, xh), lam) = cutmix_bbox_and_lam(output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] lam_batch[i] = lam else: mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) np.rint(mixed, out=mixed) output[i] += torch.from_numpy(mixed.astype(np.uint8)) if half: lam_batch = np.concatenate((lam_batch, np.ones(num_elem))) return torch.tensor(lam_batch).unsqueeze(1) def _mix_pair_collate(self, output, batch): batch_size = len(batch) (lam_batch, use_cutmix) = self._params_per_elem(batch_size // 2) for i in range(batch_size // 2): j = batch_size - i - 1 lam = lam_batch[i] mixed_i = batch[i][0] mixed_j = batch[j][0] assert 0 <= lam <= 1.0 if lam < 1.0: if use_cutmix[i]: ((yl, yh, xl, xh), lam) = cutmix_bbox_and_lam(output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) patch_i = mixed_i[:, yl:yh, xl:xh].copy() mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh] mixed_j[:, yl:yh, xl:xh] = patch_i lam_batch[i] = lam else: mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam) mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam) mixed_i = mixed_temp np.rint(mixed_j, out=mixed_j) np.rint(mixed_i, out=mixed_i) output[i] += torch.from_numpy(mixed_i.astype(np.uint8)) output[j] += torch.from_numpy(mixed_j.astype(np.uint8)) lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) return torch.tensor(lam_batch).unsqueeze(1) def _mix_batch_collate(self, output, batch): batch_size = len(batch) (lam, use_cutmix) = self._params_per_batch() if use_cutmix: ((yl, yh, xl, xh), lam) = cutmix_bbox_and_lam(output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) for i in range(batch_size): j = batch_size - i - 1 mixed = batch[i][0] if lam != 1.0: if use_cutmix: mixed = mixed.copy() mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] else: mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) np.rint(mixed, out=mixed) output[i] += torch.from_numpy(mixed.astype(np.uint8)) return lam def __call__(self, batch, _=None): batch_size = len(batch) assert batch_size % 2 == 0, 'Batch size should be even when using this' half = 'half' in self.mode if half: batch_size //= 2 output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) if self.mode == 'elem' or self.mode == 'half': lam = self._mix_elem_collate(output, batch, half=half) elif self.mode == 'pair': lam = self._mix_pair_collate(output, batch) else: lam = self._mix_batch_collate(output, batch) target = torch.tensor([b[1] for b in batch], dtype=torch.int64) target = mixup_target(target, self.num_classes, lam, self.label_smoothing) target = target[:batch_size] return (output, target) # File: pytorch-image-models-main/timm/data/random_erasing.py """""" import random import math import torch def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): if per_pixel: return torch.empty(patch_size, dtype=dtype, device=device).normal_() elif rand_color: return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() else: return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) class RandomErasing: def __init__(self, probability=0.5, min_area=0.02, max_area=1 / 3, min_aspect=0.3, max_aspect=None, mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): self.probability = probability self.min_area = min_area self.max_area = max_area max_aspect = max_aspect or 1 / min_aspect self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) self.min_count = min_count self.max_count = max_count or min_count self.num_splits = num_splits self.mode = mode.lower() self.rand_color = False self.per_pixel = False if self.mode == 'rand': self.rand_color = True elif self.mode == 'pixel': self.per_pixel = True else: assert not self.mode or self.mode == 'const' self.device = device def _erase(self, img, chan, img_h, img_w, dtype): if random.random() > self.probability: return area = img_h * img_w count = self.min_count if self.min_count == self.max_count else random.randint(self.min_count, self.max_count) for _ in range(count): for attempt in range(10): target_area = random.uniform(self.min_area, self.max_area) * area / count aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) h = int(round(math.sqrt(target_area * aspect_ratio))) w = int(round(math.sqrt(target_area / aspect_ratio))) if w < img_w and h < img_h: top = random.randint(0, img_h - h) left = random.randint(0, img_w - w) img[:, top:top + h, left:left + w] = _get_pixels(self.per_pixel, self.rand_color, (chan, h, w), dtype=dtype, device=self.device) break def __call__(self, input): if len(input.size()) == 3: self._erase(input, *input.size(), input.dtype) else: (batch_size, chan, img_h, img_w) = input.size() batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 for i in range(batch_start, batch_size): self._erase(input[i], chan, img_h, img_w, input.dtype) return input def __repr__(self): fs = self.__class__.__name__ + f'(p={self.probability}, mode={self.mode}' fs += f', count=({self.min_count}, {self.max_count}))' return fs # File: pytorch-image-models-main/timm/data/readers/class_map.py import os import pickle def load_class_map(map_or_filename, root=''): if isinstance(map_or_filename, dict): assert dict, 'class_map dict must be non-empty' return map_or_filename class_map_path = map_or_filename if not os.path.exists(class_map_path): class_map_path = os.path.join(root, class_map_path) assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % map_or_filename class_map_ext = os.path.splitext(map_or_filename)[-1].lower() if class_map_ext == '.txt': with open(class_map_path) as f: class_to_idx = {v.strip(): k for (k, v) in enumerate(f)} elif class_map_ext == '.pkl': with open(class_map_path, 'rb') as f: class_to_idx = pickle.load(f) else: assert False, f'Unsupported class map file extension ({class_map_ext}).' return class_to_idx # File: pytorch-image-models-main/timm/data/readers/img_extensions.py from copy import deepcopy __all__ = ['get_img_extensions', 'is_img_extension', 'set_img_extensions', 'add_img_extensions', 'del_img_extensions'] IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') _IMG_EXTENSIONS_SET = set(IMG_EXTENSIONS) def _set_extensions(extensions): global IMG_EXTENSIONS global _IMG_EXTENSIONS_SET dedupe = set() IMG_EXTENSIONS = tuple((x for x in extensions if x not in dedupe and (not dedupe.add(x)))) _IMG_EXTENSIONS_SET = set(extensions) def _valid_extension(x: str): return x and isinstance(x, str) and (len(x) >= 2) and x.startswith('.') def is_img_extension(ext): return ext in _IMG_EXTENSIONS_SET def get_img_extensions(as_set=False): return deepcopy(_IMG_EXTENSIONS_SET if as_set else IMG_EXTENSIONS) def set_img_extensions(extensions): assert len(extensions) for x in extensions: assert _valid_extension(x) _set_extensions(extensions) def add_img_extensions(ext): if not isinstance(ext, (list, tuple, set)): ext = (ext,) for x in ext: assert _valid_extension(x) extensions = IMG_EXTENSIONS + tuple(ext) _set_extensions(extensions) def del_img_extensions(ext): if not isinstance(ext, (list, tuple, set)): ext = (ext,) extensions = tuple((x for x in IMG_EXTENSIONS if x not in ext)) _set_extensions(extensions) # File: pytorch-image-models-main/timm/data/readers/reader.py from abc import abstractmethod class Reader: def __init__(self): pass @abstractmethod def _filename(self, index, basename=False, absolute=False): pass def filename(self, index, basename=False, absolute=False): return self._filename(index, basename=basename, absolute=absolute) def filenames(self, basename=False, absolute=False): return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] # File: pytorch-image-models-main/timm/data/readers/reader_factory.py import os from typing import Optional from .reader_image_folder import ReaderImageFolder from .reader_image_in_tar import ReaderImageInTar def create_reader(name: str, root: Optional[str]=None, split: str='train', **kwargs): kwargs = {k: v for (k, v) in kwargs.items() if v is not None} name = name.lower() name = name.split('/', 1) prefix = '' if len(name) > 1: prefix = name[0] name = name[-1] if prefix == 'hfds': from .reader_hfds import ReaderHfds reader = ReaderHfds(name=name, root=root, split=split, **kwargs) elif prefix == 'hfids': from .reader_hfids import ReaderHfids reader = ReaderHfids(name=name, root=root, split=split, **kwargs) elif prefix == 'tfds': from .reader_tfds import ReaderTfds reader = ReaderTfds(name=name, root=root, split=split, **kwargs) elif prefix == 'wds': from .reader_wds import ReaderWds kwargs.pop('download', False) reader = ReaderWds(root=root, name=name, split=split, **kwargs) else: assert os.path.exists(root) if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': reader = ReaderImageInTar(root, **kwargs) else: reader = ReaderImageFolder(root, **kwargs) return reader # File: pytorch-image-models-main/timm/data/readers/reader_hfds.py """""" import io import math from typing import Optional import torch import torch.distributed as dist from PIL import Image try: import datasets except ImportError as e: print('Please install Hugging Face datasets package `pip install datasets`.') raise e from .class_map import load_class_map from .reader import Reader def get_class_labels(info, label_key='label'): if 'label' not in info.features: return {} class_label = info.features[label_key] class_to_idx = {n: class_label.str2int(n) for n in class_label.names} return class_to_idx class ReaderHfds(Reader): def __init__(self, name: str, root: Optional[str]=None, split: str='train', class_map: dict=None, input_key: str='image', target_key: str='label', download: bool=False): super().__init__() self.root = root self.split = split self.dataset = datasets.load_dataset(name, split=split, cache_dir=self.root) self.dataset = self.dataset.cast_column(input_key, datasets.Image(decode=False)) self.image_key = input_key self.label_key = target_key self.remap_class = False if class_map: self.class_to_idx = load_class_map(class_map) self.remap_class = True else: self.class_to_idx = get_class_labels(self.dataset.info, self.label_key) self.split_info = self.dataset.info.splits[split] self.num_samples = self.split_info.num_examples def __getitem__(self, index): item = self.dataset[index] image = item[self.image_key] if 'bytes' in image and image['bytes']: image = io.BytesIO(image['bytes']) else: assert 'path' in image and image['path'] image = open(image['path'], 'rb') label = item[self.label_key] if self.remap_class: label = self.class_to_idx[label] return (image, label) def __len__(self): return len(self.dataset) def _filename(self, index, basename=False, absolute=False): item = self.dataset[index] return item[self.image_key]['path'] # File: pytorch-image-models-main/timm/data/readers/reader_hfids.py """""" import math import os from itertools import repeat, chain from typing import Optional import torch import torch.distributed as dist from PIL import Image try: import datasets from datasets.distributed import split_dataset_by_node from datasets.splits import SplitInfo except ImportError as e: print('Please install Hugging Face datasets package `pip install datasets`.') raise e from .class_map import load_class_map from .reader import Reader from .shared_count import SharedCount SHUFFLE_SIZE = int(os.environ.get('HFIDS_SHUFFLE_SIZE', 4096)) class ReaderHfids(Reader): def __init__(self, name: str, root: Optional[str]=None, split: str='train', is_training: bool=False, batch_size: int=1, download: bool=False, repeats: int=0, seed: int=42, class_map: Optional[dict]=None, input_key: str='image', input_img_mode: str='RGB', target_key: str='label', target_img_mode: str='', shuffle_size: Optional[int]=None, num_samples: Optional[int]=None): super().__init__() self.root = root self.split = split self.is_training = is_training self.batch_size = batch_size self.download = download self.repeats = repeats self.common_seed = seed self.shuffle_size = shuffle_size or SHUFFLE_SIZE self.input_key = input_key self.input_img_mode = input_img_mode self.target_key = target_key self.target_img_mode = target_img_mode self.builder = datasets.load_dataset_builder(name, cache_dir=root) if download: self.builder.download_and_prepare() split_info: Optional[SplitInfo] = None if self.builder.info.splits and split in self.builder.info.splits: if isinstance(self.builder.info.splits[split], SplitInfo): split_info: Optional[SplitInfo] = self.builder.info.splits[split] if num_samples: self.num_samples = num_samples elif split_info and split_info.num_examples: self.num_samples = split_info.num_examples else: raise ValueError('Dataset length is unknown, please pass `num_samples` explicitely. The number of steps needs to be known in advance for the learning rate scheduler.') self.remap_class = False if class_map: self.class_to_idx = load_class_map(class_map) self.remap_class = True else: self.class_to_idx = {} self.dist_rank = 0 self.dist_num_replicas = 1 if dist.is_available() and dist.is_initialized() and (dist.get_world_size() > 1): self.dist_rank = dist.get_rank() self.dist_num_replicas = dist.get_world_size() self.worker_info = None self.worker_id = 0 self.num_workers = 1 self.global_worker_id = 0 self.global_num_workers = 1 self.ds: Optional[datasets.IterableDataset] = None self.epoch = SharedCount() def set_epoch(self, count): self.epoch.value = count def set_loader_cfg(self, num_workers: Optional[int]=None): if self.ds is not None: return if num_workers is not None: self.num_workers = num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers def _lazy_init(self): if self.worker_info is None: worker_info = torch.utils.data.get_worker_info() if worker_info is not None: self.worker_info = worker_info self.worker_id = worker_info.id self.num_workers = worker_info.num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers self.global_worker_id = self.dist_rank * self.num_workers + self.worker_id if self.download: dataset = self.builder.as_dataset(split=self.split) ds = dataset.to_iterable_dataset(num_shards=self.global_num_workers) else: ds = self.builder.as_streaming_dataset(split=self.split) if self.is_training: ds = ds.shuffle(seed=self.common_seed, buffer_size=self.shuffle_size) self.ds = split_dataset_by_node(ds, rank=self.dist_rank, world_size=self.dist_num_replicas) def _num_samples_per_worker(self): num_worker_samples = max(1, self.repeats) * self.num_samples / max(self.global_num_workers, self.dist_num_replicas) if self.is_training or self.dist_num_replicas > 1: num_worker_samples = math.ceil(num_worker_samples) if self.is_training and self.batch_size is not None: num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size return int(num_worker_samples) def __iter__(self): if self.ds is None: self._lazy_init() self.ds.set_epoch(self.epoch.value) target_sample_count = self._num_samples_per_worker() sample_count = 0 if self.is_training: ds_iter = chain.from_iterable(repeat(self.ds)) else: ds_iter = iter(self.ds) for sample in ds_iter: input_data: Image.Image = sample[self.input_key] if self.input_img_mode and input_data.mode != self.input_img_mode: input_data = input_data.convert(self.input_img_mode) target_data = sample[self.target_key] if self.target_img_mode: assert isinstance(target_data, Image.Image), 'target_img_mode is specified but target is not an image' if target_data.mode != self.target_img_mode: target_data = target_data.convert(self.target_img_mode) elif self.remap_class: target_data = self.class_to_idx[target_data] yield (input_data, target_data) sample_count += 1 if self.is_training and sample_count >= target_sample_count: break def __len__(self): num_samples = self._num_samples_per_worker() * self.num_workers return num_samples def _filename(self, index, basename=False, absolute=False): assert False, 'Not supported' def filenames(self, basename=False, absolute=False): if self.ds is None: self._lazy_init() names = [] for sample in self.ds: if 'file_name' in sample: name = sample['file_name'] elif 'filename' in sample: name = sample['filename'] elif 'id' in sample: name = sample['id'] elif 'image_id' in sample: name = sample['image_id'] else: assert False, 'No supported name field present' names.append(name) return names # File: pytorch-image-models-main/timm/data/readers/reader_image_folder.py """""" import os from typing import Dict, List, Optional, Set, Tuple, Union from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader def find_images_and_targets(folder: str, types: Optional[Union[List, Tuple, Set]]=None, class_to_idx: Optional[Dict]=None, leaf_name_only: bool=True, sort: bool=True): types = get_img_extensions(as_set=True) if not types else set(types) labels = [] filenames = [] for (root, subdirs, files) in os.walk(folder, topdown=False, followlinks=True): rel_path = os.path.relpath(root, folder) if root != folder else '' label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') for f in files: (base, ext) = os.path.splitext(f) if ext.lower() in types: filenames.append(os.path.join(root, f)) labels.append(label) if class_to_idx is None: unique_labels = set(labels) sorted_labels = list(sorted(unique_labels, key=natural_key)) class_to_idx = {c: idx for (idx, c) in enumerate(sorted_labels)} images_and_targets = [(f, class_to_idx[l]) for (f, l) in zip(filenames, labels) if l in class_to_idx] if sort: images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) return (images_and_targets, class_to_idx) class ReaderImageFolder(Reader): def __init__(self, root, class_map='', input_key=None): super().__init__() self.root = root class_to_idx = None if class_map: class_to_idx = load_class_map(class_map, root) find_types = None if input_key: find_types = input_key.split(';') (self.samples, self.class_to_idx) = find_images_and_targets(root, class_to_idx=class_to_idx, types=find_types) if len(self.samples) == 0: raise RuntimeError(f"Found 0 images in subfolders of {root}. Supported image extensions are {', '.join(get_img_extensions())}") def __getitem__(self, index): (path, target) = self.samples[index] return (open(path, 'rb'), target) def __len__(self): return len(self.samples) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0] if basename: filename = os.path.basename(filename) elif not absolute: filename = os.path.relpath(filename, self.root) return filename # File: pytorch-image-models-main/timm/data/readers/reader_image_in_tar.py """""" import logging import os import pickle import tarfile from glob import glob from typing import List, Tuple, Dict, Set, Optional, Union import numpy as np from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader _logger = logging.getLogger(__name__) CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' class TarState: def __init__(self, tf: tarfile.TarFile=None, ti: tarfile.TarInfo=None): self.tf: tarfile.TarFile = tf self.ti: tarfile.TarInfo = ti self.children: Dict[str, TarState] = {} def reset(self): self.tf = None def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions: Set[str]): sample_count = 0 for (i, ti) in enumerate(tf): if not ti.isfile(): continue (dirname, basename) = os.path.split(ti.path) (name, ext) = os.path.splitext(basename) ext = ext.lower() if ext == '.tar': with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: child_info = dict(name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) _logger.debug(f"{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info['samples'])} images.") parent_info['children'].append(child_info) elif ext in extensions: parent_info['samples'].append(ti) sample_count += 1 return sample_count def extract_tarinfos(root, class_name_to_idx: Optional[Dict]=None, cache_tarinfo: Optional[bool]=None, extensions: Optional[Union[List, Tuple, Set]]=None, sort: bool=True): extensions = get_img_extensions(as_set=True) if not extensions else set(extensions) root_is_tar = False if os.path.isfile(root): assert os.path.splitext(root)[-1].lower() == '.tar' tar_filenames = [root] (root, root_name) = os.path.split(root) root_name = os.path.splitext(root_name)[0] root_is_tar = True else: root_name = root.strip(os.path.sep).split(os.path.sep)[-1] tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) num_tars = len(tar_filenames) tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) assert num_tars, f'No .tar files found at specified path ({root}).' _logger.info(f'Scanning {tar_bytes / 1024 ** 2:.2f}MB of tar files...') info = dict(tartrees=[]) cache_path = '' if cache_tarinfo is None: cache_tarinfo = True if tar_bytes > 10 * 1024 ** 3 else False if cache_tarinfo: cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX cache_path = os.path.join(root, cache_filename) if os.path.exists(cache_path): _logger.info(f'Reading tar info from cache file {cache_path}.') with open(cache_path, 'rb') as pf: info = pickle.load(pf) assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" else: for (i, fn) in enumerate(tar_filenames): path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] with tarfile.open(fn, mode='r|') as tf: parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) num_children = len(parent_info['children']) _logger.debug(f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') info['tartrees'].append(parent_info) if cache_path: _logger.info(f'Writing tar info to cache file {cache_path}.') with open(cache_path, 'wb') as pf: pickle.dump(info, pf) samples = [] labels = [] build_class_map = False if class_name_to_idx is None: build_class_map = True tarfiles = [] def _label_from_paths(*path, leaf_only=True): path = os.path.join(*path).strip(os.path.sep) return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') def _add_samples(info, fn): added = 0 for s in info['samples']: label = _label_from_paths(info['path'], os.path.dirname(s.path)) if not build_class_map and label not in class_name_to_idx: continue samples.append((s, fn, info['ti'])) labels.append(label) added += 1 return added _logger.info(f'Collecting samples and building tar states.') for parent_info in info['tartrees']: tar_name = None if root_is_tar else parent_info['name'] tar_state = TarState() parent_added = 0 for child_info in parent_info['children']: child_added = _add_samples(child_info, fn=tar_name) if child_added: tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) parent_added += child_added parent_added += _add_samples(parent_info, fn=tar_name) if parent_added: tarfiles.append((tar_name, tar_state)) del info if build_class_map: sorted_labels = list(sorted(set(labels), key=natural_key)) class_name_to_idx = {c: idx for (idx, c) in enumerate(sorted_labels)} _logger.info(f'Mapping targets and sorting samples.') samples_and_targets = [(s, class_name_to_idx[l]) for (s, l) in zip(samples, labels) if l in class_name_to_idx] if sort: samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) (samples, targets) = zip(*samples_and_targets) samples = np.array(samples) targets = np.array(targets) _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') return (samples, targets, class_name_to_idx, tarfiles) class ReaderImageInTar(Reader): def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): super().__init__() class_name_to_idx = None if class_map: class_name_to_idx = load_class_map(class_map, root) self.root = root (self.samples, self.targets, self.class_name_to_idx, tarfiles) = extract_tarinfos(self.root, class_name_to_idx=class_name_to_idx, cache_tarinfo=cache_tarinfo) self.class_idx_to_name = {v: k for (k, v) in self.class_name_to_idx.items()} if len(tarfiles) == 1 and tarfiles[0][0] is None: self.root_is_tar = True self.tar_state = tarfiles[0][1] else: self.root_is_tar = False self.tar_state = dict(tarfiles) self.cache_tarfiles = cache_tarfiles def __len__(self): return len(self.samples) def __getitem__(self, index): sample = self.samples[index] target = self.targets[index] (sample_ti, parent_fn, child_ti) = sample parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root tf = None cache_state = None if self.cache_tarfiles: cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] tf = cache_state.tf if tf is None: tf = tarfile.open(parent_abs) if self.cache_tarfiles: cache_state.tf = tf if child_ti is not None: ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None if ctf is None: ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) if self.cache_tarfiles: cache_state.children[child_ti.name].tf = ctf tf = ctf return (tf.extractfile(sample_ti), target) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename # File: pytorch-image-models-main/timm/data/readers/reader_image_tar.py """""" import os import tarfile from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader def extract_tarinfo(tarfile, class_to_idx=None, sort=True): extensions = get_img_extensions(as_set=True) files = [] labels = [] for ti in tarfile.getmembers(): if not ti.isfile(): continue (dirname, basename) = os.path.split(ti.path) label = os.path.basename(dirname) ext = os.path.splitext(basename)[1] if ext.lower() in extensions: files.append(ti) labels.append(label) if class_to_idx is None: unique_labels = set(labels) sorted_labels = list(sorted(unique_labels, key=natural_key)) class_to_idx = {c: idx for (idx, c) in enumerate(sorted_labels)} tarinfo_and_targets = [(f, class_to_idx[l]) for (f, l) in zip(files, labels) if l in class_to_idx] if sort: tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) return (tarinfo_and_targets, class_to_idx) class ReaderImageTar(Reader): def __init__(self, root, class_map=''): super().__init__() class_to_idx = None if class_map: class_to_idx = load_class_map(class_map, root) assert os.path.isfile(root) self.root = root with tarfile.open(root) as tf: (self.samples, self.class_to_idx) = extract_tarinfo(tf, class_to_idx) self.imgs = self.samples self.tarfile = None def __getitem__(self, index): if self.tarfile is None: self.tarfile = tarfile.open(self.root) (tarinfo, target) = self.samples[index] fileobj = self.tarfile.extractfile(tarinfo) return (fileobj, target) def __len__(self): return len(self.samples) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename # File: pytorch-image-models-main/timm/data/readers/reader_tfds.py """""" import math import os import sys from typing import Optional import torch import torch.distributed as dist from PIL import Image try: import tensorflow as tf tf.config.set_visible_devices([], 'GPU') import tensorflow_datasets as tfds try: tfds.even_splits('', 1, drop_remainder=False) has_buggy_even_splits = False except TypeError: print("Warning: This version of tfds doesn't have the latest even_splits impl. Please update or use tfds-nightly for better fine-grained split behaviour.") has_buggy_even_splits = True except ImportError as e: print(e) print('Please install tensorflow_datasets package `pip install tensorflow-datasets`.') raise e from .class_map import load_class_map from .reader import Reader from .shared_count import SharedCount MAX_TP_SIZE = int(os.environ.get('TFDS_TP_SIZE', 8)) SHUFFLE_SIZE = int(os.environ.get('TFDS_SHUFFLE_SIZE', 8192)) PREFETCH_SIZE = int(os.environ.get('TFDS_PREFETCH_SIZE', 2048)) @tfds.decode.make_decoder() def decode_example(serialized_image, feature, dct_method='INTEGER_ACCURATE', channels=3): return tf.image.decode_jpeg(serialized_image, channels=channels, dct_method=dct_method) def even_split_indices(split, n, num_samples): partitions = [round(i * num_samples / n) for i in range(n + 1)] return [f'{split}[{partitions[i]}:{partitions[i + 1]}]' for i in range(n)] def get_class_labels(info): if 'label' not in info.features: return {} class_label = info.features['label'] class_to_idx = {n: class_label.str2int(n) for n in class_label.names} return class_to_idx class ReaderTfds(Reader): def __init__(self, name, root=None, split='train', class_map=None, is_training=False, batch_size=1, download=False, repeats=0, seed=42, input_key='image', input_img_mode='RGB', target_key='label', target_img_mode='', prefetch_size=None, shuffle_size=None, max_threadpool_size=None): super().__init__() self.root = root self.split = split self.is_training = is_training self.batch_size = batch_size self.repeats = repeats self.common_seed = seed self.prefetch_size = prefetch_size or PREFETCH_SIZE self.shuffle_size = shuffle_size or SHUFFLE_SIZE self.max_threadpool_size = max_threadpool_size or MAX_TP_SIZE self.input_key = input_key self.input_img_mode = input_img_mode self.target_key = target_key self.target_img_mode = target_img_mode self.builder = tfds.builder(name, data_dir=root) if download: self.builder.download_and_prepare() self.remap_class = False if class_map: self.class_to_idx = load_class_map(class_map) self.remap_class = True else: self.class_to_idx = get_class_labels(self.builder.info) if self.target_key == 'label' else {} self.split_info = self.builder.info.splits[split] self.num_samples = self.split_info.num_examples self.dist_rank = 0 self.dist_num_replicas = 1 if dist.is_available() and dist.is_initialized() and (dist.get_world_size() > 1): self.dist_rank = dist.get_rank() self.dist_num_replicas = dist.get_world_size() self.global_num_workers = 1 self.num_workers = 1 self.worker_info = None self.worker_seed = 0 self.subsplit = None self.ds = None self.init_count = 0 self.epoch_count = SharedCount() self.reinit_each_iter = self.is_training def set_epoch(self, count): self.epoch_count.value = count def set_loader_cfg(self, num_workers: Optional[int]=None): if self.ds is not None: return if num_workers is not None: self.num_workers = num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers def _lazy_init(self): worker_info = torch.utils.data.get_worker_info() num_workers = 1 global_worker_id = 0 if worker_info is not None: self.worker_info = worker_info self.worker_seed = worker_info.seed self.num_workers = worker_info.num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers global_worker_id = self.dist_rank * self.num_workers + worker_info.id '' should_subsplit = self.global_num_workers > 1 and (self.split_info.num_shards < self.global_num_workers or not self.is_training) if should_subsplit: if has_buggy_even_splits: if not isinstance(self.split_info, tfds.core.splits.SubSplitInfo): subsplits = even_split_indices(self.split, self.global_num_workers, self.num_samples) self.subsplit = subsplits[global_worker_id] else: subsplits = tfds.even_splits(self.split, self.global_num_workers) self.subsplit = subsplits[global_worker_id] input_context = None if self.global_num_workers > 1 and self.subsplit is None: input_context = tf.distribute.InputContext(num_input_pipelines=self.global_num_workers, input_pipeline_id=global_worker_id, num_replicas_in_sync=self.dist_num_replicas) read_config = tfds.ReadConfig(shuffle_seed=self.common_seed + self.epoch_count.value, shuffle_reshuffle_each_iteration=True, input_context=input_context) ds = self.builder.as_dataset(split=self.subsplit or self.split, shuffle_files=self.is_training, decoders=dict(image=decode_example(channels=1 if self.input_img_mode == 'L' else 3)), read_config=read_config) options = tf.data.Options() thread_member = 'threading' if hasattr(options, 'threading') else 'experimental_threading' getattr(options, thread_member).private_threadpool_size = max(1, self.max_threadpool_size // self.num_workers) getattr(options, thread_member).max_intra_op_parallelism = 1 ds = ds.with_options(options) if self.is_training or self.repeats > 1: ds = ds.repeat() if self.is_training: ds = ds.shuffle(min(self.num_samples, self.shuffle_size) // self.global_num_workers, seed=self.worker_seed) ds = ds.prefetch(min(self.num_samples // self.global_num_workers, self.prefetch_size)) self.ds = tfds.as_numpy(ds) self.init_count += 1 def _num_samples_per_worker(self): num_worker_samples = max(1, self.repeats) * self.num_samples / max(self.global_num_workers, self.dist_num_replicas) if self.is_training or self.dist_num_replicas > 1: num_worker_samples = math.ceil(num_worker_samples) if self.is_training: num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size return int(num_worker_samples) def __iter__(self): if self.ds is None or self.reinit_each_iter: self._lazy_init() target_sample_count = self._num_samples_per_worker() sample_count = 0 for sample in self.ds: input_data = sample[self.input_key] if self.input_img_mode: if self.input_img_mode == 'L' and input_data.ndim == 3: input_data = input_data[:, :, 0] input_data = Image.fromarray(input_data, mode=self.input_img_mode) target_data = sample[self.target_key] if self.target_img_mode: target_data = Image.fromarray(target_data, mode=self.target_img_mode) elif self.remap_class: target_data = self.class_to_idx[target_data] yield (input_data, target_data) sample_count += 1 if self.is_training and sample_count >= target_sample_count: break if not self.is_training and self.dist_num_replicas > 1 and (self.subsplit is not None) and (0 < sample_count < target_sample_count): while sample_count < target_sample_count: yield (input_data, target_data) sample_count += 1 def __len__(self): num_samples = self._num_samples_per_worker() * self.num_workers return num_samples def _filename(self, index, basename=False, absolute=False): assert False, 'Not supported' def filenames(self, basename=False, absolute=False): if self.ds is None: self._lazy_init() names = [] for sample in self.ds: if len(names) > self.num_samples: break if 'file_name' in sample: name = sample['file_name'] elif 'filename' in sample: name = sample['filename'] elif 'id' in sample: name = sample['id'] else: assert False, 'No supported name field present' names.append(name) return names # File: pytorch-image-models-main/timm/data/readers/reader_wds.py """""" import io import json import logging import math import os import random import sys from dataclasses import dataclass from functools import partial from itertools import islice from typing import Any, Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist import yaml from PIL import Image from torch.utils.data import Dataset, IterableDataset, get_worker_info try: import webdataset as wds from webdataset.filters import _shuffle, getfirst from webdataset.shardlists import expand_urls from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample except ImportError: wds = None expand_urls = None from .class_map import load_class_map from .reader import Reader from .shared_count import SharedCount _logger = logging.getLogger(__name__) SAMPLE_SHUFFLE_SIZE = int(os.environ.get('WDS_SHUFFLE_SIZE', 8192)) SAMPLE_INITIAL_SIZE = int(os.environ.get('WDS_INITIAL_SIZE', 2048)) def _load_info(root, names=('_info.json', 'info.json')): if isinstance(names, str): names = (names,) tried = [] err_str = '' for n in names: full_path = os.path.join(root, n) try: tried.append(full_path) with wds.gopen(full_path) as f: if n.endswith('.json'): info_dict = json.load(f) else: info_dict = yaml.safe_load(f) return info_dict except Exception as e: err_str = str(e) _logger.warning(f'Dataset info file not found at {tried}. Error: {err_str}. Falling back to provided split and size arg.') return {} @dataclass class SplitInfo: num_samples: int filenames: Tuple[str] shard_lengths: Tuple[int] = () alt_label: str = '' name: str = '' def _parse_split_info(split: str, info: Dict): def _info_convert(dict_info): return SplitInfo(num_samples=dict_info['num_samples'], filenames=tuple(dict_info['filenames']), shard_lengths=tuple(dict_info['shard_lengths']), alt_label=dict_info.get('alt_label', ''), name=dict_info['name']) if 'tar' in split or '..' in split: split = split.split('|') num_samples = 0 split_name = '' if len(split) > 1: num_samples = int(split[1]) split = split[0] if '::' not in split: split_parts = split.split('-', 3) split_idx = len(split_parts) - 1 if split_idx and 'splits' in info and (split_parts[split_idx] in info['splits']): split_name = split_parts[split_idx] split_filenames = expand_urls(split) if split_name: split_info = info['splits'][split_name] if not num_samples: _fc = {f: c for (f, c) in zip(split_info['filenames'], split_info['shard_lengths'])} num_samples = sum((_fc[f] for f in split_filenames)) split_info['filenames'] = tuple(_fc.keys()) split_info['shard_lengths'] = tuple(_fc.values()) split_info['num_samples'] = num_samples split_info = _info_convert(split_info) else: split_info = SplitInfo(name=split_name, num_samples=num_samples, filenames=split_filenames) else: if 'splits' not in info or split not in info['splits']: raise RuntimeError(f"split {split} not found in info ({info.get('splits', {}).keys()})") split = split split_info = info['splits'][split] split_info = _info_convert(split_info) return split_info def log_and_continue(exn): _logger.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.') if isinstance(exn, TypeError): raise exn return True def _decode(sample, image_key='jpg', image_mode='RGB', target_key='cls', alt_label=''): if alt_label: meta = json.loads(sample['json']) class_label = int(meta[alt_label]) if class_label < 0: return None else: class_label = int(sample[target_key]) img = getfirst(sample, image_key) with io.BytesIO(img) as b: img = Image.open(b) img.load() if image_mode: img = img.convert(image_mode) decoded = dict(jpg=img, cls=class_label, json=sample.get('json', None)) return decoded def pytorch_worker_seed(): worker_info = get_worker_info() if worker_info is not None: return worker_info.seed return wds.utils.pytorch_worker_seed() if wds is not None: class detshuffle2(wds.PipelineStage): def __init__(self, bufsize=1000, initial=100, seed=0, epoch=-1): self.bufsize = bufsize self.initial = initial self.seed = seed self.epoch = epoch def run(self, src): if isinstance(self.epoch, SharedCount): epoch = self.epoch.value else: self.epoch += 1 epoch = self.epoch if self.seed < 0: seed = pytorch_worker_seed() + epoch else: seed = self.seed + epoch rng = random.Random(seed) return _shuffle(src, self.bufsize, self.initial, rng) else: detshuffle2 = None class ResampledShards2(IterableDataset): def __init__(self, urls, nshards=sys.maxsize, worker_seed=None, deterministic=True, epoch=-1): super().__init__() urls = wds.shardlists.expand_urls(urls) self.urls = urls assert isinstance(self.urls[0], str) self.nshards = nshards self.rng = random.Random() self.worker_seed = pytorch_worker_seed if worker_seed is None else worker_seed self.deterministic = deterministic self.epoch = epoch def __iter__(self): if isinstance(self.epoch, SharedCount): epoch = self.epoch.value else: self.epoch += 1 epoch = self.epoch if self.deterministic: self.rng = random.Random(self.worker_seed() + epoch) for _ in range(self.nshards): index = self.rng.randint(0, len(self.urls) - 1) yield dict(url=self.urls[index]) class ReaderWds(Reader): def __init__(self, root: str, name: Optional[str]=None, split: str='train', is_training: bool=False, num_samples: Optional[int]=None, batch_size: int=1, repeats: int=0, seed: int=42, class_map: Optional[dict]=None, input_key: str='jpg;png;webp', input_img_mode: str='RGB', target_key: str='cls', target_img_mode: str='', filename_key: str='filename', sample_shuffle_size: Optional[int]=None, smaple_initial_size: Optional[int]=None): super().__init__() if wds is None: raise RuntimeError('Please install webdataset 0.2.x package `pip install git+https://github.com/webdataset/webdataset`.') self.root = root self.is_training = is_training self.batch_size = batch_size self.repeats = repeats self.common_seed = seed self.shard_shuffle_size = 500 self.sample_shuffle_size = sample_shuffle_size or SAMPLE_SHUFFLE_SIZE self.sample_initial_size = smaple_initial_size or SAMPLE_INITIAL_SIZE self.input_key = input_key self.input_img_mode = input_img_mode self.target_key = target_key self.filename_key = filename_key self.key_ext = '.JPEG' self.info = _load_info(self.root) self.split_info = _parse_split_info(split, self.info) if num_samples is not None: self.num_samples = num_samples else: self.num_samples = self.split_info.num_samples if not self.num_samples: raise RuntimeError(f'Invalid split definition, num_samples not specified.') self.remap_class = False if class_map: self.class_to_idx = load_class_map(class_map) self.remap_class = True else: self.class_to_idx = {} self.dist_rank = 0 self.dist_num_replicas = 1 if dist.is_available() and dist.is_initialized() and (dist.get_world_size() > 1): self.dist_rank = dist.get_rank() self.dist_num_replicas = dist.get_world_size() self.worker_info = None self.worker_id = 0 self.worker_seed = seed self.num_workers = 1 self.global_worker_id = 0 self.global_num_workers = 1 self.init_count = 0 self.epoch_count = SharedCount() self.ds = None def set_epoch(self, count): self.epoch_count.value = count def set_loader_cfg(self, num_workers: Optional[int]=None): if self.ds is not None: return if num_workers is not None: self.num_workers = num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers def _lazy_init(self): if self.worker_info is None: worker_info = torch.utils.data.get_worker_info() if worker_info is not None: self.worker_info = worker_info self.worker_id = worker_info.id self.worker_seed = worker_info.seed self.num_workers = worker_info.num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers self.global_worker_id = self.dist_rank * self.num_workers + self.worker_id abs_shard_filenames = [os.path.join(self.root, f) for f in self.split_info.filenames] pipeline = [wds.SimpleShardList(abs_shard_filenames)] if self.is_training: pipeline.extend([detshuffle2(self.shard_shuffle_size, seed=self.common_seed, epoch=self.epoch_count), self._split_by_node_and_worker, wds.tarfile_to_samples(handler=log_and_continue), wds.shuffle(bufsize=self.sample_shuffle_size, initial=self.sample_initial_size, rng=random.Random(self.worker_seed))]) else: pipeline.extend([self._split_by_node_and_worker, wds.tarfile_to_samples(handler=log_and_continue)]) pipeline.extend([wds.map(partial(_decode, image_key=self.input_key, image_mode=self.input_img_mode, alt_label=self.split_info.alt_label), handler=log_and_continue), wds.rename(image=self.input_key, target=self.target_key)]) self.ds = wds.DataPipeline(*pipeline) def _split_by_node_and_worker(self, src): if self.global_num_workers > 1: for s in islice(src, self.global_worker_id, None, self.global_num_workers): yield s else: for s in src: yield s def _num_samples_per_worker(self): num_worker_samples = self.num_samples / max(self.global_num_workers, self.dist_num_replicas) if self.is_training or self.dist_num_replicas > 1: num_worker_samples = math.ceil(num_worker_samples) if self.is_training: num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size return int(num_worker_samples) def __iter__(self): if self.ds is None: self._lazy_init() num_worker_samples = self._num_samples_per_worker() if self.is_training or self.dist_num_replicas > 1: ds = self.ds.with_epoch(num_worker_samples) else: ds = self.ds i = 0 for sample in ds: target = sample['target'] if self.remap_class: target = self.class_to_idx[target] yield (sample['image'], target) i += 1 def __len__(self): num_samples = self._num_samples_per_worker() * self.num_workers return num_samples def _filename(self, index, basename=False, absolute=False): assert False, 'Not supported' def filenames(self, basename=False, absolute=False): if self.ds is None: self._lazy_init() names = [] for sample in self.ds: if self.filename_key in sample: name = sample[self.filename_key] elif '__key__' in sample: name = sample['__key__'] + self.key_ext else: assert False, 'No supported name field present' names.append(name) if len(names) >= self.num_samples: break return names # File: pytorch-image-models-main/timm/data/readers/shared_count.py from multiprocessing import Value class SharedCount: def __init__(self, epoch: int=0): self.shared_epoch = Value('i', epoch) @property def value(self): return self.shared_epoch.value @value.setter def value(self, epoch): self.shared_epoch.value = epoch # File: pytorch-image-models-main/timm/data/real_labels.py """""" import os import json import numpy as np import pkgutil class RealLabelsImagenet: def __init__(self, filenames, real_json=None, topk=(1, 5)): if real_json is not None: with open(real_json) as real_labels: real_labels = json.load(real_labels) else: real_labels = json.loads(pkgutil.get_data(__name__, os.path.join('_info', 'imagenet_real_labels.json')).decode('utf-8')) real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for (i, labels) in enumerate(real_labels)} self.real_labels = real_labels self.filenames = filenames assert len(self.filenames) == len(self.real_labels) self.topk = topk self.is_correct = {k: [] for k in topk} self.sample_idx = 0 def add_result(self, output): maxk = max(self.topk) (_, pred_batch) = output.topk(maxk, 1, True, True) pred_batch = pred_batch.cpu().numpy() for pred in pred_batch: filename = self.filenames[self.sample_idx] filename = os.path.basename(filename) if self.real_labels[filename]: for k in self.topk: self.is_correct[k].append(any([p in self.real_labels[filename] for p in pred[:k]])) self.sample_idx += 1 def get_accuracy(self, k=None): if k is None: return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk} else: return float(np.mean(self.is_correct[k])) * 100 # File: pytorch-image-models-main/timm/data/tf_preprocessing.py """""" '' import tensorflow.compat.v1 as tf import numpy as np IMAGE_SIZE = 224 CROP_PADDING = 32 tf.compat.v1.disable_eager_execution() def distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): shape = tf.image.extract_jpeg_shape(image_bytes) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) (bbox_begin, bbox_size, _) = sample_distorted_bounding_box (offset_y, offset_x, _) = tf.unstack(bbox_begin) (target_height, target_width, _) = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image def _at_least_x_are_equal(a, b, x): match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x) def _decode_and_random_crop(image_bytes, image_size, resize_method): bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(3.0 / 4, 4.0 / 3.0), area_range=(0.08, 1.0), max_attempts=10, scope=None) original_shape = tf.image.extract_jpeg_shape(image_bytes) bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) image = tf.cond(bad, lambda : _decode_and_center_crop(image_bytes, image_size), lambda : tf.image.resize([image], [image_size, image_size], resize_method)[0]) return image def _decode_and_center_crop(image_bytes, image_size, resize_method): shape = tf.image.extract_jpeg_shape(image_bytes) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast(image_size / (image_size + CROP_PADDING) * tf.cast(tf.minimum(image_height, image_width), tf.float32), tf.int32) offset_height = (image_height - padded_center_crop_size + 1) // 2 offset_width = (image_width - padded_center_crop_size + 1) // 2 crop_window = tf.stack([offset_height, offset_width, padded_center_crop_size, padded_center_crop_size]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) image = tf.image.resize([image], [image_size, image_size], resize_method)[0] return image def _flip(image): image = tf.image.random_flip_left_right(image) return image def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR image = _decode_and_random_crop(image_bytes, image_size, resize_method) image = _flip(image) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype(image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) return image def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR image = _decode_and_center_crop(image_bytes, image_size, resize_method) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype(image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) return image def preprocess_image(image_bytes, is_training=False, use_bfloat16=False, image_size=IMAGE_SIZE, interpolation='bicubic'): if is_training: return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) else: return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) class TfPreprocessTransform: def __init__(self, is_training=False, size=224, interpolation='bicubic'): self.is_training = is_training self.size = size[0] if isinstance(size, tuple) else size self.interpolation = interpolation self._image_bytes = None self.process_image = self._build_tf_graph() self.sess = None def _build_tf_graph(self): with tf.device('/cpu:0'): self._image_bytes = tf.placeholder(shape=[], dtype=tf.string) img = preprocess_image(self._image_bytes, self.is_training, False, self.size, self.interpolation) return img def __call__(self, image_bytes): if self.sess is None: self.sess = tf.Session() img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) img = img.round().clip(0, 255).astype(np.uint8) if img.ndim < 3: img = np.expand_dims(img, axis=-1) img = np.rollaxis(img, 2) return img # File: pytorch-image-models-main/timm/data/transforms.py import math import numbers import random import warnings from typing import List, Sequence, Tuple, Union import torch import torchvision.transforms as transforms import torchvision.transforms.functional as F try: from torchvision.transforms.functional import InterpolationMode has_interpolation_mode = True except ImportError: has_interpolation_mode = False from PIL import Image import numpy as np __all__ = ['ToNumpy', 'ToTensor', 'str_to_interp_mode', 'str_to_pil_interp', 'interp_mode_to_str', 'RandomResizedCropAndInterpolation', 'CenterCropOrPad', 'center_crop_or_pad', 'crop_or_pad', 'RandomCropOrPad', 'RandomPad', 'ResizeKeepRatio', 'TrimBorder', 'MaybeToTensor', 'MaybePILToTensor'] class ToNumpy: def __call__(self, pil_img): np_img = np.array(pil_img, dtype=np.uint8) if np_img.ndim < 3: np_img = np.expand_dims(np_img, axis=-1) np_img = np.rollaxis(np_img, 2) return np_img class ToTensor: def __init__(self, dtype=torch.float32): self.dtype = dtype def __call__(self, pil_img): return F.pil_to_tensor(pil_img).to(dtype=self.dtype) class MaybeToTensor(transforms.ToTensor): def __init__(self) -> None: super().__init__() def __call__(self, pic) -> torch.Tensor: if isinstance(pic, torch.Tensor): return pic return F.to_tensor(pic) def __repr__(self) -> str: return f'{self.__class__.__name__}()' class MaybePILToTensor: def __init__(self) -> None: super().__init__() def __call__(self, pic): if isinstance(pic, torch.Tensor): return pic return F.pil_to_tensor(pic) def __repr__(self) -> str: return f'{self.__class__.__name__}()' if hasattr(Image, 'Resampling'): _pil_interpolation_to_str = {Image.Resampling.NEAREST: 'nearest', Image.Resampling.BILINEAR: 'bilinear', Image.Resampling.BICUBIC: 'bicubic', Image.Resampling.BOX: 'box', Image.Resampling.HAMMING: 'hamming', Image.Resampling.LANCZOS: 'lanczos'} else: _pil_interpolation_to_str = {Image.NEAREST: 'nearest', Image.BILINEAR: 'bilinear', Image.BICUBIC: 'bicubic', Image.BOX: 'box', Image.HAMMING: 'hamming', Image.LANCZOS: 'lanczos'} _str_to_pil_interpolation = {b: a for (a, b) in _pil_interpolation_to_str.items()} if has_interpolation_mode: _torch_interpolation_to_str = {InterpolationMode.NEAREST: 'nearest', InterpolationMode.BILINEAR: 'bilinear', InterpolationMode.BICUBIC: 'bicubic', InterpolationMode.BOX: 'box', InterpolationMode.HAMMING: 'hamming', InterpolationMode.LANCZOS: 'lanczos'} _str_to_torch_interpolation = {b: a for (a, b) in _torch_interpolation_to_str.items()} else: _pil_interpolation_to_torch = {} _torch_interpolation_to_str = {} def str_to_pil_interp(mode_str): return _str_to_pil_interpolation[mode_str] def str_to_interp_mode(mode_str): if has_interpolation_mode: return _str_to_torch_interpolation[mode_str] else: return _str_to_pil_interpolation[mode_str] def interp_mode_to_str(mode): if has_interpolation_mode: return _torch_interpolation_to_str[mode] else: return _pil_interpolation_to_str[mode] _RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic')) def _setup_size(size, error_msg='Please provide only two dimensions (h, w) for size.'): if isinstance(size, numbers.Number): return (int(size), int(size)) if isinstance(size, Sequence) and len(size) == 1: return (size[0], size[0]) if len(size) != 2: raise ValueError(error_msg) return size class RandomResizedCropAndInterpolation: def __init__(self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation='bilinear'): if isinstance(size, (list, tuple)): self.size = tuple(size) else: self.size = (size, size) if scale[0] > scale[1] or ratio[0] > ratio[1]: warnings.warn('range should be of kind (min, max)') if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = str_to_interp_mode(interpolation) self.scale = scale self.ratio = ratio @staticmethod def get_params(img, scale, ratio): (img_w, img_h) = F.get_image_size(img) area = img_w * img_h for attempt in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) target_w = int(round(math.sqrt(target_area * aspect_ratio))) target_h = int(round(math.sqrt(target_area / aspect_ratio))) if target_w <= img_w and target_h <= img_h: i = random.randint(0, img_h - target_h) j = random.randint(0, img_w - target_w) return (i, j, target_h, target_w) in_ratio = img_w / img_h if in_ratio < min(ratio): target_w = img_w target_h = int(round(target_w / min(ratio))) elif in_ratio > max(ratio): target_h = img_h target_w = int(round(target_h * max(ratio))) else: target_w = img_w target_h = img_h i = (img_h - target_h) // 2 j = (img_w - target_w) // 2 return (i, j, target_h, target_w) def __call__(self, img): (i, j, h, w) = self.get_params(img, self.scale, self.ratio) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation return F.resized_crop(img, i, j, h, w, self.size, interpolation) def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation]) else: interpolate_str = interp_mode_to_str(self.interpolation) format_string = self.__class__.__name__ + '(size={0}'.format(self.size) format_string += ', scale={0}'.format(tuple((round(s, 4) for s in self.scale))) format_string += ', ratio={0}'.format(tuple((round(r, 4) for r in self.ratio))) format_string += ', interpolation={0})'.format(interpolate_str) return format_string def center_crop_or_pad(img: torch.Tensor, output_size: Union[int, List[int]], fill: Union[int, Tuple[int, int, int]]=0, padding_mode: str='constant') -> torch.Tensor: output_size = _setup_size(output_size) (crop_height, crop_width) = output_size (_, image_height, image_width) = F.get_dimensions(img) if crop_width > image_width or crop_height > image_height: padding_ltrb = [(crop_width - image_width) // 2 if crop_width > image_width else 0, (crop_height - image_height) // 2 if crop_height > image_height else 0, (crop_width - image_width + 1) // 2 if crop_width > image_width else 0, (crop_height - image_height + 1) // 2 if crop_height > image_height else 0] img = F.pad(img, padding_ltrb, fill=fill, padding_mode=padding_mode) (_, image_height, image_width) = F.get_dimensions(img) if crop_width == image_width and crop_height == image_height: return img crop_top = int(round((image_height - crop_height) / 2.0)) crop_left = int(round((image_width - crop_width) / 2.0)) return F.crop(img, crop_top, crop_left, crop_height, crop_width) class CenterCropOrPad(torch.nn.Module): def __init__(self, size: Union[int, List[int]], fill: Union[int, Tuple[int, int, int]]=0, padding_mode: str='constant'): super().__init__() self.size = _setup_size(size) self.fill = fill self.padding_mode = padding_mode def forward(self, img): return center_crop_or_pad(img, self.size, fill=self.fill, padding_mode=self.padding_mode) def __repr__(self) -> str: return f'{self.__class__.__name__}(size={self.size})' def crop_or_pad(img: torch.Tensor, top: int, left: int, height: int, width: int, fill: Union[int, Tuple[int, int, int]]=0, padding_mode: str='constant') -> torch.Tensor: (_, image_height, image_width) = F.get_dimensions(img) right = left + width bottom = top + height if left < 0 or top < 0 or right > image_width or (bottom > image_height): padding_ltrb = [max(-left + min(0, right), 0), max(-top + min(0, bottom), 0), max(right - max(image_width, left), 0), max(bottom - max(image_height, top), 0)] img = F.pad(img, padding_ltrb, fill=fill, padding_mode=padding_mode) top = max(top, 0) left = max(left, 0) return F.crop(img, top, left, height, width) class RandomCropOrPad(torch.nn.Module): def __init__(self, size: Union[int, List[int]], fill: Union[int, Tuple[int, int, int]]=0, padding_mode: str='constant'): super().__init__() self.size = _setup_size(size) self.fill = fill self.padding_mode = padding_mode @staticmethod def get_params(img, size): (_, image_height, image_width) = F.get_dimensions(img) delta_height = image_height - size[0] delta_width = image_width - size[1] top = int(math.copysign(random.randint(0, abs(delta_height)), delta_height)) left = int(math.copysign(random.randint(0, abs(delta_width)), delta_width)) return (top, left) def forward(self, img): (top, left) = self.get_params(img, self.size) return crop_or_pad(img, top=top, left=left, height=self.size[0], width=self.size[1], fill=self.fill, padding_mode=self.padding_mode) def __repr__(self) -> str: return f'{self.__class__.__name__}(size={self.size})' class RandomPad: def __init__(self, input_size, fill=0): self.input_size = input_size self.fill = fill @staticmethod def get_params(img, input_size): (width, height) = F.get_image_size(img) delta_width = max(input_size[1] - width, 0) delta_height = max(input_size[0] - height, 0) pad_left = random.randint(0, delta_width) pad_top = random.randint(0, delta_height) pad_right = delta_width - pad_left pad_bottom = delta_height - pad_top return (pad_left, pad_top, pad_right, pad_bottom) def __call__(self, img): padding = self.get_params(img, self.input_size) img = F.pad(img, padding, self.fill) return img class ResizeKeepRatio: def __init__(self, size, longest=0.0, interpolation='bilinear', random_scale_prob=0.0, random_scale_range=(0.85, 1.05), random_scale_area=False, random_aspect_prob=0.0, random_aspect_range=(0.9, 1.11)): if isinstance(size, (list, tuple)): self.size = tuple(size) else: self.size = (size, size) if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = str_to_interp_mode(interpolation) self.longest = float(longest) self.random_scale_prob = random_scale_prob self.random_scale_range = random_scale_range self.random_scale_area = random_scale_area self.random_aspect_prob = random_aspect_prob self.random_aspect_range = random_aspect_range @staticmethod def get_params(img, target_size, longest, random_scale_prob=0.0, random_scale_range=(1.0, 1.33), random_scale_area=False, random_aspect_prob=0.0, random_aspect_range=(0.9, 1.11)): (img_h, img_w) = img_size = F.get_dimensions(img)[1:] (target_h, target_w) = target_size ratio_h = img_h / target_h ratio_w = img_w / target_w ratio = max(ratio_h, ratio_w) * longest + min(ratio_h, ratio_w) * (1.0 - longest) if random_scale_prob > 0 and random.random() < random_scale_prob: ratio_factor = random.uniform(random_scale_range[0], random_scale_range[1]) if random_scale_area: ratio_factor = 1.0 / math.sqrt(ratio_factor) ratio_factor = (ratio_factor, ratio_factor) else: ratio_factor = (1.0, 1.0) if random_aspect_prob > 0 and random.random() < random_aspect_prob: log_aspect = (math.log(random_aspect_range[0]), math.log(random_aspect_range[1])) aspect_factor = math.exp(random.uniform(*log_aspect)) aspect_factor = math.sqrt(aspect_factor) ratio_factor = (ratio_factor[0] / aspect_factor, ratio_factor[1] * aspect_factor) size = [round(x * f / ratio) for (x, f) in zip(img_size, ratio_factor)] return size def __call__(self, img): size = self.get_params(img, self.size, self.longest, self.random_scale_prob, self.random_scale_range, self.random_scale_area, self.random_aspect_prob, self.random_aspect_range) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation img = F.resize(img, size, interpolation) return img def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation]) else: interpolate_str = interp_mode_to_str(self.interpolation) format_string = self.__class__.__name__ + '(size={0}'.format(self.size) format_string += f', interpolation={interpolate_str}' format_string += f', longest={self.longest:.3f}' format_string += f', random_scale_prob={self.random_scale_prob:.3f}' format_string += f', random_scale_range=({self.random_scale_range[0]:.3f}, {self.random_aspect_range[1]:.3f})' format_string += f', random_aspect_prob={self.random_aspect_prob:.3f}' format_string += f', random_aspect_range=({self.random_aspect_range[0]:.3f}, {self.random_aspect_range[1]:.3f}))' return format_string class TrimBorder(torch.nn.Module): def __init__(self, border_size: int): super().__init__() self.border_size = border_size def forward(self, img): (w, h) = F.get_image_size(img) top = left = self.border_size top = min(top, h) left = min(left, h) height = max(0, h - 2 * self.border_size) width = max(0, w - 2 * self.border_size) return F.crop(img, top, left, height, width) # File: pytorch-image-models-main/timm/data/transforms_factory.py """""" import math from typing import Optional, Tuple, Union import torch from torchvision import transforms from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform from timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation, ResizeKeepRatio, CenterCropOrPad, RandomCropOrPad, TrimBorder, ToNumpy, MaybeToTensor, MaybePILToTensor from timm.data.random_erasing import RandomErasing def transforms_noaug_train(img_size: Union[int, Tuple[int, int]]=224, interpolation: str='bilinear', mean: Tuple[float, ...]=IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...]=IMAGENET_DEFAULT_STD, use_prefetcher: bool=False, normalize: bool=True): if interpolation == 'random': interpolation = 'bilinear' tfl = [transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)), transforms.CenterCrop(img_size)] if use_prefetcher: tfl += [ToNumpy()] elif not normalize: tfl += [MaybePILToTensor()] else: tfl += [MaybeToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))] return transforms.Compose(tfl) def transforms_imagenet_train(img_size: Union[int, Tuple[int, int]]=224, scale: Optional[Tuple[float, float]]=None, ratio: Optional[Tuple[float, float]]=None, train_crop_mode: Optional[str]=None, hflip: float=0.5, vflip: float=0.0, color_jitter: Union[float, Tuple[float, ...]]=0.4, color_jitter_prob: Optional[float]=None, force_color_jitter: bool=False, grayscale_prob: float=0.0, gaussian_blur_prob: float=0.0, auto_augment: Optional[str]=None, interpolation: str='random', mean: Tuple[float, ...]=IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...]=IMAGENET_DEFAULT_STD, re_prob: float=0.0, re_mode: str='const', re_count: int=1, re_num_splits: int=0, use_prefetcher: bool=False, normalize: bool=True, separate: bool=False): train_crop_mode = train_crop_mode or 'rrc' assert train_crop_mode in {'rrc', 'rkrc', 'rkrr'} if train_crop_mode in ('rkrc', 'rkrr'): scale = tuple(scale or (0.8, 1.0)) ratio = tuple(ratio or (0.9, 1 / 0.9)) primary_tfl = [ResizeKeepRatio(img_size, interpolation=interpolation, random_scale_prob=0.5, random_scale_range=scale, random_scale_area=True, random_aspect_prob=0.5, random_aspect_range=ratio), CenterCropOrPad(img_size, padding_mode='reflect') if train_crop_mode == 'rkrc' else RandomCropOrPad(img_size, padding_mode='reflect')] else: scale = tuple(scale or (0.08, 1.0)) ratio = tuple(ratio or (3.0 / 4.0, 4.0 / 3.0)) primary_tfl = [RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)] if hflip > 0.0: primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] if vflip > 0.0: primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] secondary_tfl = [] disable_color_jitter = False if auto_augment: assert isinstance(auto_augment, str) disable_color_jitter = not (force_color_jitter or '3a' in auto_augment) if isinstance(img_size, (tuple, list)): img_size_min = min(img_size) else: img_size_min = img_size aa_params = dict(translate_const=int(img_size_min * 0.45), img_mean=tuple([min(255, round(255 * x)) for x in mean])) if interpolation and interpolation != 'random': aa_params['interpolation'] = str_to_pil_interp(interpolation) if auto_augment.startswith('rand'): secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] elif auto_augment.startswith('augmix'): aa_params['translate_pct'] = 0.3 secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] else: secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] if color_jitter is not None and (not disable_color_jitter): if isinstance(color_jitter, (list, tuple)): assert len(color_jitter) in (3, 4) else: color_jitter = (float(color_jitter),) * 3 if color_jitter_prob is not None: secondary_tfl += [transforms.RandomApply([transforms.ColorJitter(*color_jitter)], p=color_jitter_prob)] else: secondary_tfl += [transforms.ColorJitter(*color_jitter)] if grayscale_prob: secondary_tfl += [transforms.RandomGrayscale(p=grayscale_prob)] if gaussian_blur_prob: secondary_tfl += [transforms.RandomApply([transforms.GaussianBlur(kernel_size=23)], p=gaussian_blur_prob)] final_tfl = [] if use_prefetcher: final_tfl += [ToNumpy()] elif not normalize: final_tfl += [MaybePILToTensor()] else: final_tfl += [MaybeToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))] if re_prob > 0.0: final_tfl += [RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')] if separate: return (transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl)) else: return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) def transforms_imagenet_eval(img_size: Union[int, Tuple[int, int]]=224, crop_pct: Optional[float]=None, crop_mode: Optional[str]=None, crop_border_pixels: Optional[int]=None, interpolation: str='bilinear', mean: Tuple[float, ...]=IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...]=IMAGENET_DEFAULT_STD, use_prefetcher: bool=False, normalize: bool=True): crop_pct = crop_pct or DEFAULT_CROP_PCT if isinstance(img_size, (tuple, list)): assert len(img_size) == 2 scale_size = tuple([math.floor(x / crop_pct) for x in img_size]) else: scale_size = math.floor(img_size / crop_pct) scale_size = (scale_size, scale_size) tfl = [] if crop_border_pixels: tfl += [TrimBorder(crop_border_pixels)] if crop_mode == 'squash': tfl += [transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)), transforms.CenterCrop(img_size)] elif crop_mode == 'border': fill = [round(255 * v) for v in mean] tfl += [ResizeKeepRatio(scale_size, interpolation=interpolation, longest=1.0), CenterCropOrPad(img_size, fill=fill)] else: if scale_size[0] == scale_size[1]: tfl += [transforms.Resize(scale_size[0], interpolation=str_to_interp_mode(interpolation))] else: tfl += [ResizeKeepRatio(scale_size)] tfl += [transforms.CenterCrop(img_size)] if use_prefetcher: tfl += [ToNumpy()] elif not normalize: tfl += [MaybePILToTensor()] else: tfl += [MaybeToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))] return transforms.Compose(tfl) def create_transform(input_size: Union[int, Tuple[int, int], Tuple[int, int, int]]=224, is_training: bool=False, no_aug: bool=False, train_crop_mode: Optional[str]=None, scale: Optional[Tuple[float, float]]=None, ratio: Optional[Tuple[float, float]]=None, hflip: float=0.5, vflip: float=0.0, color_jitter: Union[float, Tuple[float, ...]]=0.4, color_jitter_prob: Optional[float]=None, grayscale_prob: float=0.0, gaussian_blur_prob: float=0.0, auto_augment: Optional[str]=None, interpolation: str='bilinear', mean: Tuple[float, ...]=IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...]=IMAGENET_DEFAULT_STD, re_prob: float=0.0, re_mode: str='const', re_count: int=1, re_num_splits: int=0, crop_pct: Optional[float]=None, crop_mode: Optional[str]=None, crop_border_pixels: Optional[int]=None, tf_preprocessing: bool=False, use_prefetcher: bool=False, normalize: bool=True, separate: bool=False): if isinstance(input_size, (tuple, list)): img_size = input_size[-2:] else: img_size = input_size if tf_preprocessing and use_prefetcher: assert not separate, 'Separate transforms not supported for TF preprocessing' from timm.data.tf_preprocessing import TfPreprocessTransform transform = TfPreprocessTransform(is_training=is_training, size=img_size, interpolation=interpolation) elif is_training and no_aug: assert not separate, 'Cannot perform split augmentation with no_aug' transform = transforms_noaug_train(img_size, interpolation=interpolation, mean=mean, std=std, use_prefetcher=use_prefetcher, normalize=normalize) elif is_training: transform = transforms_imagenet_train(img_size, train_crop_mode=train_crop_mode, scale=scale, ratio=ratio, hflip=hflip, vflip=vflip, color_jitter=color_jitter, color_jitter_prob=color_jitter_prob, grayscale_prob=grayscale_prob, gaussian_blur_prob=gaussian_blur_prob, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, re_prob=re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits, use_prefetcher=use_prefetcher, normalize=normalize, separate=separate) else: assert not separate, 'Separate transforms not supported for validation preprocessing' transform = transforms_imagenet_eval(img_size, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, crop_mode=crop_mode, crop_border_pixels=crop_border_pixels, use_prefetcher=use_prefetcher, normalize=normalize) return transform # File: pytorch-image-models-main/timm/layers/__init__.py from .activations import * from .adaptive_avgmax_pool import adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from .attention2d import MultiQueryAttention2d, Attention2d, MultiQueryAttentionV2 from .attention_pool import AttentionPoolLatent from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from .blur_pool import BlurPool2d, create_aa from .classifier import create_classifier, ClassifierHead, NormMlpClassifierHead, ClNormMlpClassifierHead from .cond_conv2d import CondConv2d, get_condconv_initializer from .config import is_exportable, is_scriptable, is_no_jit, use_fused_attn, set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn from .conv2d_same import Conv2dSame, conv2d_same from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from .create_act import create_act_layer, get_act_layer, get_act_fn from .create_attn import get_attn, create_attn from .create_conv2d import create_conv2d from .create_norm import get_norm_layer, create_norm_layer from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2, EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to from .gather_excite import GatherExcite from .global_context import GlobalContext from .grid import ndgrid, meshgrid from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from .hybrid_embed import HybridEmbed, HybridEmbedWithSize from .inplace_abn import InplaceAbn from .layer_scale import LayerScale, LayerScale2d from .linear import Linear from .mixed_conv2d import MixedConv2d from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm from .norm_act import BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d, SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d from .padding import get_padding, get_same_padding, pad_same from .patch_dropout import PatchDropout from .patch_embed import PatchEmbed, PatchEmbedWithSize, resample_patch_embed from .pool2d_same import AvgPool2dSame, create_pool2d from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc from .pos_embed_rel import RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords, resize_rel_pos_bias_table, resize_rel_pos_bias_table_simple, resize_rel_pos_bias_table_levit from .pos_embed_sincos import pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from .selective_kernel import SelectiveKernel from .separable_conv import SeparableConv2d, SeparableConvNormAct from .space_to_depth import SpaceToDepth, DepthToSpace from .split_attn import SplitAttn from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from .test_time_pool import TestTimePoolHead, apply_test_time_pool from .trace_utils import _assert, _float_to_int from .typing import LayerType, PadType from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_, init_weight_jax, init_weight_vit # File: pytorch-image-models-main/timm/layers/activations.py """""" import torch from torch import nn as nn from torch.nn import functional as F def swish(x, inplace: bool=False): return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) class Swish(nn.Module): def __init__(self, inplace: bool=False): super(Swish, self).__init__() self.inplace = inplace def forward(self, x): return swish(x, self.inplace) def mish(x, inplace: bool=False): return x.mul(F.softplus(x).tanh()) class Mish(nn.Module): def __init__(self, inplace: bool=False): super(Mish, self).__init__() def forward(self, x): return mish(x) def sigmoid(x, inplace: bool=False): return x.sigmoid_() if inplace else x.sigmoid() class Sigmoid(nn.Module): def __init__(self, inplace: bool=False): super(Sigmoid, self).__init__() self.inplace = inplace def forward(self, x): return x.sigmoid_() if self.inplace else x.sigmoid() def tanh(x, inplace: bool=False): return x.tanh_() if inplace else x.tanh() class Tanh(nn.Module): def __init__(self, inplace: bool=False): super(Tanh, self).__init__() self.inplace = inplace def forward(self, x): return x.tanh_() if self.inplace else x.tanh() def hard_swish(x, inplace: bool=False): inner = F.relu6(x + 3.0).div_(6.0) return x.mul_(inner) if inplace else x.mul(inner) class HardSwish(nn.Module): def __init__(self, inplace: bool=False): super(HardSwish, self).__init__() self.inplace = inplace def forward(self, x): return hard_swish(x, self.inplace) def hard_sigmoid(x, inplace: bool=False): if inplace: return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0) else: return F.relu6(x + 3.0) / 6.0 class HardSigmoid(nn.Module): def __init__(self, inplace: bool=False): super(HardSigmoid, self).__init__() self.inplace = inplace def forward(self, x): return hard_sigmoid(x, self.inplace) def hard_mish(x, inplace: bool=False): if inplace: return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) else: return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMish(nn.Module): def __init__(self, inplace: bool=False): super(HardMish, self).__init__() self.inplace = inplace def forward(self, x): return hard_mish(x, self.inplace) class PReLU(nn.PReLU): def __init__(self, num_parameters: int=1, init: float=0.25, inplace: bool=False) -> None: super(PReLU, self).__init__(num_parameters=num_parameters, init=init) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.prelu(input, self.weight) def gelu(x: torch.Tensor, inplace: bool=False) -> torch.Tensor: return F.gelu(x) class GELU(nn.Module): def __init__(self, inplace: bool=False): super(GELU, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return F.gelu(input) def gelu_tanh(x: torch.Tensor, inplace: bool=False) -> torch.Tensor: return F.gelu(x, approximate='tanh') class GELUTanh(nn.Module): def __init__(self, inplace: bool=False): super(GELUTanh, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return F.gelu(input, approximate='tanh') def quick_gelu(x: torch.Tensor, inplace: bool=False) -> torch.Tensor: return x * torch.sigmoid(1.702 * x) class QuickGELU(nn.Module): def __init__(self, inplace: bool=False): super(QuickGELU, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return quick_gelu(input) # File: pytorch-image-models-main/timm/layers/activations_me.py """""" import torch from torch import nn as nn from torch.nn import functional as F def swish_fwd(x): return x.mul(torch.sigmoid(x)) def swish_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) class SwishAutoFn(torch.autograd.Function): @staticmethod def symbolic(g, x): return g.op('Mul', x, g.op('Sigmoid', x)) @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return swish_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return swish_bwd(x, grad_output) def swish_me(x, inplace=False): return SwishAutoFn.apply(x) class SwishMe(nn.Module): def __init__(self, inplace: bool=False): super(SwishMe, self).__init__() def forward(self, x): return SwishAutoFn.apply(x) def mish_fwd(x): return x.mul(torch.tanh(F.softplus(x))) def mish_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) x_tanh_sp = F.softplus(x).tanh() return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) class MishAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return mish_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return mish_bwd(x, grad_output) def mish_me(x, inplace=False): return MishAutoFn.apply(x) class MishMe(nn.Module): def __init__(self, inplace: bool=False): super(MishMe, self).__init__() def forward(self, x): return MishAutoFn.apply(x) def hard_sigmoid_fwd(x, inplace: bool=False): return (x + 3).clamp(min=0, max=6).div(6.0) def hard_sigmoid_bwd(x, grad_output): m = torch.ones_like(x) * ((x >= -3.0) & (x <= 3.0)) / 6.0 return grad_output * m class HardSigmoidAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_sigmoid_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_sigmoid_bwd(x, grad_output) def hard_sigmoid_me(x, inplace: bool=False): return HardSigmoidAutoFn.apply(x) class HardSigmoidMe(nn.Module): def __init__(self, inplace: bool=False): super(HardSigmoidMe, self).__init__() def forward(self, x): return HardSigmoidAutoFn.apply(x) def hard_swish_fwd(x): return x * (x + 3).clamp(min=0, max=6).div(6.0) def hard_swish_bwd(x, grad_output): m = torch.ones_like(x) * (x >= 3.0) m = torch.where((x >= -3.0) & (x <= 3.0), x / 3.0 + 0.5, m) return grad_output * m class HardSwishAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_swish_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_swish_bwd(x, grad_output) @staticmethod def symbolic(g, self): input = g.op('Add', self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) hardtanh_ = g.op('Clip', input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) hardtanh_ = g.op('Div', hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) return g.op('Mul', self, hardtanh_) def hard_swish_me(x, inplace=False): return HardSwishAutoFn.apply(x) class HardSwishMe(nn.Module): def __init__(self, inplace: bool=False): super(HardSwishMe, self).__init__() def forward(self, x): return HardSwishAutoFn.apply(x) def hard_mish_fwd(x): return 0.5 * x * (x + 2).clamp(min=0, max=2) def hard_mish_bwd(x, grad_output): m = torch.ones_like(x) * (x >= -2.0) m = torch.where((x >= -2.0) & (x <= 0.0), x + 1.0, m) return grad_output * m class HardMishAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_mish_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_mish_bwd(x, grad_output) def hard_mish_me(x, inplace: bool=False): return HardMishAutoFn.apply(x) class HardMishMe(nn.Module): def __init__(self, inplace: bool=False): super(HardMishMe, self).__init__() def forward(self, x): return HardMishAutoFn.apply(x) # File: pytorch-image-models-main/timm/layers/adaptive_avgmax_pool.py """""" from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from .format import get_spatial_dim, get_channel_dim _int_tuple_2_t = Union[int, Tuple[int, int]] def adaptive_pool_feat_mult(pool_type='avg'): if pool_type.endswith('catavgmax'): return 2 else: return 1 def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t=1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return 0.5 * (x_avg + x_max) def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t=1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return torch.cat((x_avg, x_max), 1) def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t=1): if pool_type == 'avg': x = F.adaptive_avg_pool2d(x, output_size) elif pool_type == 'avgmax': x = adaptive_avgmax_pool2d(x, output_size) elif pool_type == 'catavgmax': x = adaptive_catavgmax_pool2d(x, output_size) elif pool_type == 'max': x = F.adaptive_max_pool2d(x, output_size) else: assert False, 'Invalid pool type: %s' % pool_type return x class FastAdaptiveAvgPool(nn.Module): def __init__(self, flatten: bool=False, input_fmt: F='NCHW'): super(FastAdaptiveAvgPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.mean(self.dim, keepdim=not self.flatten) class FastAdaptiveMaxPool(nn.Module): def __init__(self, flatten: bool=False, input_fmt: str='NCHW'): super(FastAdaptiveMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.amax(self.dim, keepdim=not self.flatten) class FastAdaptiveAvgMaxPool(nn.Module): def __init__(self, flatten: bool=False, input_fmt: str='NCHW'): super(FastAdaptiveAvgMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim, keepdim=not self.flatten) x_max = x.amax(self.dim, keepdim=not self.flatten) return 0.5 * x_avg + 0.5 * x_max class FastAdaptiveCatAvgMaxPool(nn.Module): def __init__(self, flatten: bool=False, input_fmt: str='NCHW'): super(FastAdaptiveCatAvgMaxPool, self).__init__() self.flatten = flatten self.dim_reduce = get_spatial_dim(input_fmt) if flatten: self.dim_cat = 1 else: self.dim_cat = get_channel_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten) x_max = x.amax(self.dim_reduce, keepdim=not self.flatten) return torch.cat((x_avg, x_max), self.dim_cat) class AdaptiveAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t=1): super(AdaptiveAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_avgmax_pool2d(x, self.output_size) class AdaptiveCatAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t=1): super(AdaptiveCatAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_catavgmax_pool2d(x, self.output_size) class SelectAdaptivePool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t=1, pool_type: str='fast', flatten: bool=False, input_fmt: str='NCHW'): super(SelectAdaptivePool2d, self).__init__() assert input_fmt in ('NCHW', 'NHWC') self.pool_type = pool_type or '' pool_type = pool_type.lower() if not pool_type: self.pool = nn.Identity() self.flatten = nn.Flatten(1) if flatten else nn.Identity() elif pool_type.startswith('fast') or input_fmt != 'NCHW': assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.' if pool_type.endswith('catavgmax'): self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('avgmax'): self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('max'): self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt) elif pool_type == 'fast' or pool_type.endswith('avg'): self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt) else: assert False, 'Invalid pool type: %s' % pool_type self.flatten = nn.Identity() else: assert input_fmt == 'NCHW' if pool_type == 'avgmax': self.pool = AdaptiveAvgMaxPool2d(output_size) elif pool_type == 'catavgmax': self.pool = AdaptiveCatAvgMaxPool2d(output_size) elif pool_type == 'max': self.pool = nn.AdaptiveMaxPool2d(output_size) elif pool_type == 'avg': self.pool = nn.AdaptiveAvgPool2d(output_size) else: assert False, 'Invalid pool type: %s' % pool_type self.flatten = nn.Flatten(1) if flatten else nn.Identity() def is_identity(self): return not self.pool_type def forward(self, x): x = self.pool(x) x = self.flatten(x) return x def feat_mult(self): return adaptive_pool_feat_mult(self.pool_type) def __repr__(self): return self.__class__.__name__ + '(' + 'pool_type=' + self.pool_type + ', flatten=' + str(self.flatten) + ')' # File: pytorch-image-models-main/timm/layers/attention2d.py from typing import List, Optional, Union import torch from torch import nn as nn from torch.nn import functional as F from .config import use_fused_attn from .create_conv2d import create_conv2d from .helpers import to_2tuple from .pool2d_same import create_pool2d class MultiQueryAttentionV2(nn.Module): def __init__(self, dim: int, dim_out: Optional[int]=None, num_heads: int=8, key_dim: int=64, value_dim: int=64, attn_drop: float=0.0, proj_drop: float=0.0): super().__init__() dim_out = dim_out or dim self.num_heads = num_heads self.key_dim = key_dim self.value_dim = value_dim self.scale = key_dim ** (-0.5) self.query_proj = nn.Parameter(torch.randn([self.num_heads, self.key_dim, dim])) self.key_proj = nn.Parameter(torch.randn([dim, self.key_dim])) self.value_proj = nn.Parameter(torch.randn([dim, self.value_dim])) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Parameter(torch.randn([dim_out, self.num_heads, self.value_dim])) self.proj_drop = nn.Dropout(proj_drop) def _reshape_input(self, t): s = t.shape return t.reshape(s[0], s[1], -1).transpose(1, 2) def forward(self, x, m: Optional[torch.Tensor]=None): s = x.shape m = m or x reshaped_x = self._reshape_input(x) reshaped_m = self._reshape_input(m) q = torch.einsum('bnd,hkd->bnhk', reshaped_x, self.query_proj) k = torch.einsum('bmd,dk->bmk', reshaped_m, self.key_proj) attn = torch.einsum('bnhk,bmk->bnhm', q, k) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) v = torch.einsum('bmd,dv->bmv', reshaped_m, self.value_proj) o = torch.einsum('bnhm,bmv->bnhv', attn, v) result = torch.einsum('bnhv,dhv->bnd', o, self.out_proj) result = self.proj_drop(result) return result.reshape(s) class MultiQueryAttention2d(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim: int, dim_out: Optional[int]=None, num_heads: int=8, key_dim: Optional[int]=None, value_dim: Optional[int]=None, query_strides: int=1, kv_stride: int=1, dw_kernel_size: int=3, dilation: int=1, padding: Union[str, int, List[int]]='', attn_drop: float=0.0, proj_drop: float=0.0, norm_layer: nn.Module=nn.BatchNorm2d, use_bias: bool=False): super().__init__() dim_out = dim_out or dim self.num_heads = num_heads self.key_dim = key_dim or dim // num_heads self.value_dim = value_dim or dim // num_heads self.query_strides = to_2tuple(query_strides) self.kv_stride = kv_stride self.has_query_strides = any([s > 1 for s in self.query_strides]) self.scale = self.key_dim ** (-0.5) self.fused_attn = use_fused_attn() self.drop = attn_drop self.query = nn.Sequential() if self.has_query_strides: if padding == 'same': self.query.add_module('down_pool', create_pool2d('avg', kernel_size=self.query_strides, padding='same')) else: self.query.add_module('down_pool', nn.AvgPool2d(kernel_size=query_strides)) self.query.add_module('norm', norm_layer(dim)) self.query.add_module('proj', create_conv2d(dim, self.num_heads * self.key_dim, kernel_size=1, bias=use_bias)) self.key = nn.Sequential() if kv_stride > 1: self.key.add_module('down_conv', create_conv2d(dim, dim, kernel_size=dw_kernel_size, stride=kv_stride, dilation=dilation, padding=padding, depthwise=True)) self.key.add_module('norm', norm_layer(dim)) self.key.add_module('proj', create_conv2d(dim, self.key_dim, kernel_size=1, padding=padding, bias=use_bias)) self.value = nn.Sequential() if kv_stride > 1: self.value.add_module('down_conv', create_conv2d(dim, dim, kernel_size=dw_kernel_size, stride=kv_stride, dilation=dilation, padding=padding, depthwise=True)) self.value.add_module('norm', norm_layer(dim)) self.value.add_module('proj', create_conv2d(dim, self.value_dim, kernel_size=1, bias=use_bias)) self.attn_drop = nn.Dropout(attn_drop) self.output = nn.Sequential() if self.has_query_strides: self.output.add_module('upsample', nn.Upsample(scale_factor=self.query_strides, mode='bilinear', align_corners=False)) self.output.add_module('proj', create_conv2d(self.value_dim * self.num_heads, dim_out, kernel_size=1, bias=use_bias)) self.output.add_module('drop', nn.Dropout(proj_drop)) self.einsum = False def init_weights(self): nn.init.xavier_uniform_(self.query.proj.weight) nn.init.xavier_uniform_(self.key.proj.weight) nn.init.xavier_uniform_(self.value.proj.weight) if self.kv_stride > 1: nn.init.xavier_uniform_(self.key.down_conv.weight) nn.init.xavier_uniform_(self.value.down_conv.weight) nn.init.xavier_uniform_(self.output.proj.weight) def _reshape_input(self, t: torch.Tensor): s = t.shape t = t.reshape(s[0], s[1], -1).transpose(1, 2) if self.einsum: return t else: return t.unsqueeze(1).contiguous() def _reshape_projected_query(self, t: torch.Tensor, num_heads: int, key_dim: int): s = t.shape t = t.reshape(s[0], num_heads, key_dim, -1) if self.einsum: return t.permute(0, 3, 1, 2).contiguous() else: return t.transpose(-1, -2).contiguous() def _reshape_output(self, t: torch.Tensor, num_heads: int, h_px: int, w_px: int): s = t.shape feat_dim = s[-1] * num_heads if not self.einsum: t = t.transpose(1, 2) return t.reshape(s[0], h_px, w_px, feat_dim).permute(0, 3, 1, 2).contiguous() def forward(self, x, attn_mask: Optional[torch.Tensor]=None): (B, C, H, W) = s = x.shape q = self.query(x) q = self._reshape_projected_query(q, self.num_heads, self.key_dim) k = self.key(x) k = self._reshape_input(k) v = self.value(x) v = self._reshape_input(v) if self.einsum: attn = torch.einsum('blhk,bpk->blhp', q, k) * self.scale if attn_mask is not None: attn = attn + attn_mask attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) o = torch.einsum('blhp,bpk->blhk', attn, v) elif self.fused_attn: o = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-1, -2) if attn_mask is not None: attn = attn + attn_mask attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) o = attn @ v o = self._reshape_output(o, self.num_heads, H // self.query_strides[0], W // self.query_strides[1]) x = self.output(o) return x class Attention2d(nn.Module): fused_attn: torch.jit.Final[bool] '' def __init__(self, dim: int, dim_out: Optional[int]=None, num_heads: int=32, bias: bool=True, expand_first: bool=False, head_first: bool=False, attn_drop: float=0.0, proj_drop: float=0.0): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first else dim self.num_heads = num_heads self.dim_head = dim_attn // num_heads self.head_first = head_first self.scale = num_heads ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, attn_mask: Optional[torch.Tensor]=None): (B, C, H, W) = x.shape if self.head_first: (q, k, v) = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) else: (q, k, v) = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention(q.transpose(-1, -2).contiguous(), k.transpose(-1, -2).contiguous(), v.transpose(-1, -2).contiguous(), attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0.0).transpose(-1, -2).reshape(B, -1, H, W) else: q = q * self.scale attn = q.transpose(-2, -1) @ k if attn_mask is not None: attn = attn + attn_mask attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x # File: pytorch-image-models-main/timm/layers/attention_pool.py from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from .config import use_fused_attn from .mlp import Mlp from .weight_init import trunc_normal_tf_ class AttentionPoolLatent(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, in_features: int, out_features: int=None, embed_dim: int=None, num_heads: int=8, feat_size: Optional[int]=None, mlp_ratio: float=4.0, qkv_bias: bool=True, qk_norm: bool=False, latent_len: int=1, latent_dim: int=None, pos_embed: str='', pool_type: str='token', norm_layer: Optional[nn.Module]=None, drop: float=0.0): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features assert embed_dim % num_heads == 0 self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.feat_size = feat_size self.scale = self.head_dim ** (-0.5) self.pool = pool_type self.fused_attn = use_fused_attn() if pos_embed == 'abs': assert feat_size is not None self.pos_embed = nn.Parameter(torch.zeros(feat_size, in_features)) else: self.pos_embed = None self.latent_dim = latent_dim or embed_dim self.latent_len = latent_len self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim)) self.q = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.proj = nn.Linear(embed_dim, embed_dim) self.proj_drop = nn.Dropout(drop) self.norm = norm_layer(out_features) if norm_layer is not None else nn.Identity() self.mlp = Mlp(embed_dim, int(embed_dim * mlp_ratio)) self.init_weights() def init_weights(self): if self.pos_embed is not None: trunc_normal_tf_(self.pos_embed, std=self.pos_embed.shape[1] ** (-0.5)) trunc_normal_tf_(self.latent, std=self.latent_dim ** (-0.5)) def forward(self, x): (B, N, C) = x.shape if self.pos_embed is not None: x = x + self.pos_embed.unsqueeze(0).to(x.dtype) q_latent = self.latent.expand(B, -1, -1) q = self.q(q_latent).reshape(B, self.latent_len, self.num_heads, self.head_dim).transpose(1, 2) kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (k, v) = kv.unbind(0) (q, k) = (self.q_norm(q), self.k_norm(k)) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, self.latent_len, C) x = self.proj(x) x = self.proj_drop(x) x = x + self.mlp(self.norm(x)) if self.pool == 'token': x = x[:, 0] elif self.pool == 'avg': x = x.mean(1) return x # File: pytorch-image-models-main/timm/layers/attention_pool2d.py """""" from typing import Optional, Union, Tuple import torch import torch.nn as nn from .config import use_fused_attn from .helpers import to_2tuple from .pos_embed import resample_abs_pos_embed from .pos_embed_sincos import apply_rot_embed, RotaryEmbedding from .weight_init import trunc_normal_ class RotAttentionPool2d(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, in_features: int, out_features: Optional[int]=None, ref_feat_size: Union[int, Tuple[int, int]]=7, embed_dim: Optional[int]=None, head_dim: Optional[int]=64, num_heads: Optional[int]=None, qkv_bias: bool=True, qkv_separate: bool=False, pool_type: str='token', class_token: bool=False, drop_rate: float=0.0): super().__init__() assert pool_type in ('', 'token') self.embed_dim = embed_dim = embed_dim or in_features self.in_features = in_features self.out_features = out_features or in_features ref_feat_size = to_2tuple(ref_feat_size) if num_heads is not None: assert embed_dim % num_heads == 0 head_dim = embed_dim // num_heads else: assert embed_dim % head_dim == 0 num_heads = embed_dim // head_dim self.num_heads = num_heads self.head_dim = head_dim self.pool_type = pool_type.lower() self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() if class_token: self.cls_token = nn.Parameter(torch.zeros(1, embed_dim)) else: self.cls_token = None if qkv_separate: self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.qkv = None else: self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.drop = nn.Dropout(drop_rate) self.proj = nn.Linear(embed_dim, self.out_features) self.pos_embed = RotaryEmbedding(self.head_dim, in_pixels=False, ref_feat_shape=ref_feat_size) def init_weights(self, zero_init_last: bool=False): if self.qkv is None: in_features = self.q.in_features trunc_normal_(self.q.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.q.bias) trunc_normal_(self.k.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.k.bias) trunc_normal_(self.v.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.v.bias) else: in_features = self.qkv.in_features trunc_normal_(self.qkv.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.qkv.bias) def reset(self, num_classes: Optional[int]=None, pool_type: Optional[str]=None): if pool_type is not None: assert pool_type in ('', 'token') self.pool_type = pool_type if num_classes is not None: self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity() self.out_features = num_classes if num_classes > 0 else self.embed_dim def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor: if self.pool_type == 'token': x = x[:, 0] else: x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2) return x def forward(self, x, pre_logits: bool=False): (B, _, H, W) = x.shape N = H * W x = x.flatten(2).transpose(1, 2) if self.cls_token is None: x = torch.cat([x.mean(1, keepdim=True), x], dim=1) else: x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1) if self.qkv is None: q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) else: x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k, v) = x.unbind(0) (rse, rce) = self.pos_embed.get_embed((H, W)) q = torch.cat([q[:, :, :1, :], apply_rot_embed(q[:, :, 1:, :], rse, rce)], dim=2).type_as(v) k = torch.cat([k[:, :, :1, :], apply_rot_embed(k[:, :, 1:, :], rse, rce)], dim=2).type_as(v) if self.fused_attn: x = nn.functional.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, N + 1, -1) x = self.drop(x) if pre_logits: x = self._pool(x, H, W) return x x = self.proj(x) x = self._pool(x, H, W) return x class AttentionPool2d(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, in_features: int, feat_size: Union[int, Tuple[int, int]]=7, out_features: Optional[int]=None, embed_dim: Optional[int]=None, head_dim: Optional[int]=64, num_heads: Optional[int]=None, qkv_bias: bool=True, qkv_separate: bool=False, pool_type: str='token', class_token: bool=False, drop_rate: float=0.0): super().__init__() assert pool_type in ('', 'token') self.embed_dim = embed_dim = embed_dim or in_features self.in_features = in_features self.out_features = out_features or in_features if num_heads is not None: assert embed_dim % num_heads == 0 head_dim = embed_dim // num_heads else: assert embed_dim % head_dim == 0 num_heads = embed_dim // head_dim self.feat_size = to_2tuple(feat_size) self.seq_len = self.feat_size[0] * self.feat_size[1] self.num_heads = num_heads self.head_dim = head_dim self.pool_type = pool_type self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() if class_token: self.cls_token = nn.Parameter(torch.zeros(1, embed_dim)) else: self.cls_token = None if qkv_separate: self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.qkv = None else: self.q = self.k = self.v = None self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.drop = nn.Dropout(drop_rate) self.proj = nn.Linear(embed_dim, self.out_features) self.pos_embed = nn.Parameter(torch.zeros(self.seq_len + 1, in_features)) self.init_weights() def init_weights(self, zero_init_last: bool=False): if self.qkv is None: in_features = self.q.in_features trunc_normal_(self.q.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.q.bias) trunc_normal_(self.k.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.k.bias) trunc_normal_(self.v.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.v.bias) else: in_features = self.qkv.in_features trunc_normal_(self.qkv.weight, std=in_features ** (-0.5)) nn.init.zeros_(self.qkv.bias) trunc_normal_(self.pos_embed, std=in_features ** (-0.5)) def reset(self, num_classes: Optional[int]=None, pool_type: Optional[str]=None): if pool_type is not None: assert pool_type in ('', 'token') self.pool_type = pool_type if num_classes is not None: self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity() self.out_features = num_classes if num_classes > 0 else self.embed_dim def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor: if self.pool_type == 'token': x = x[:, 0] else: x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2) return x def forward(self, x, pre_logits: bool=False): (B, _, H, W) = x.shape N = H * W x = x.flatten(2).transpose(1, 2) if self.cls_token is None: x = torch.cat([x.mean(1, keepdim=True), x], dim=1) else: x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1) pos_embed = resample_abs_pos_embed(self.pos_embed.unsqueeze(0), (H, W), num_prefix_tokens=1) x = x + pos_embed if self.qkv is None: q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) else: x = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k, v) = x.unbind(0) if self.fused_attn: x = nn.functional.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, N + 1, -1) x = self.drop(x) if pre_logits: x = self._pool(x, H, W) return x x = self.proj(x) x = self._pool(x, H, W) return x # File: pytorch-image-models-main/timm/layers/blur_pool.py """""" from functools import partial from typing import Optional, Type import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from .padding import get_padding from .typing import LayerType class BlurPool2d(nn.Module): def __init__(self, channels: Optional[int]=None, filt_size: int=3, stride: int=2, pad_mode: str='reflect') -> None: super(BlurPool2d, self).__init__() assert filt_size > 1 self.channels = channels self.filt_size = filt_size self.stride = stride self.pad_mode = pad_mode self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :] if channels is not None: blur_filter = blur_filter.repeat(self.channels, 1, 1, 1) self.register_buffer('filt', blur_filter, persistent=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = F.pad(x, self.padding, mode=self.pad_mode) if self.channels is None: channels = x.shape[1] weight = self.filt.expand(channels, 1, self.filt_size, self.filt_size) else: channels = self.channels weight = self.filt return F.conv2d(x, weight, stride=self.stride, groups=channels) def create_aa(aa_layer: LayerType, channels: Optional[int]=None, stride: int=2, enable: bool=True, noop: Optional[Type[nn.Module]]=nn.Identity) -> nn.Module: if not aa_layer or not enable: return noop() if noop is not None else None if isinstance(aa_layer, str): aa_layer = aa_layer.lower().replace('_', '').replace('-', '') if aa_layer == 'avg' or aa_layer == 'avgpool': aa_layer = nn.AvgPool2d elif aa_layer == 'blur' or aa_layer == 'blurpool': aa_layer = BlurPool2d elif aa_layer == 'blurpc': aa_layer = partial(BlurPool2d, pad_mode='constant') else: assert False, f'Unknown anti-aliasing layer ({aa_layer}).' try: return aa_layer(channels=channels, stride=stride) except TypeError as e: return aa_layer(stride) # File: pytorch-image-models-main/timm/layers/bottleneck_attn.py """""" from typing import List import torch import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): (B, H, W, dim) = q.shape x = q @ rel_k.transpose(-1, -2) x = x.reshape(-1, W, 2 * W - 1) x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, W - 1]) x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) x = x_pad[:, :W, W - 1:] x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): def __init__(self, feat_size, dim_head, scale): super().__init__() (self.height, self.width) = to_2tuple(feat_size) self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) def forward(self, q): (B, HW, _) = q.shape q = q.reshape(B, self.height, self.width, -1) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, HW, HW) return rel_logits class BottleneckAttn(nn.Module): def __init__(self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): super().__init__() assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' dim_out = dim_out or dim assert dim_out % num_heads == 0 self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** (-0.5) self.scale_pos_embed = scale_pos_embed self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** (-0.5)) trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): (B, C, H, W) = x.shape _assert(H == self.pos_embed.height, '') _assert(W == self.pos_embed.width, '') x = self.qkv(x) (q, k, v) = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) if self.scale_pos_embed: attn = (q @ k + self.pos_embed(q)) * self.scale else: attn = q @ k * self.scale + self.pos_embed(q) attn = attn.softmax(dim=-1) out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) out = self.pool(out) return out # File: pytorch-image-models-main/timm/layers/cbam.py """""" import torch from torch import nn as nn import torch.nn.functional as F from .conv_bn_act import ConvNormAct from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible class ChannelAttn(nn.Module): def __init__(self, channels, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(ChannelAttn, self).__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.0) self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) self.act = act_layer(inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) return x * self.gate(x_avg + x_max) class LightChannelAttn(ChannelAttn): def __init__(self, channels, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightChannelAttn, self).__init__(channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) def forward(self, x): x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) x_attn = self.fc2(self.act(self.fc1(x_pool))) return x * F.sigmoid(x_attn) class SpatialAttn(nn.Module): def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(SpatialAttn, self).__init__() self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class LightSpatialAttn(nn.Module): def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(LightSpatialAttn, self).__init__() self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class CbamModule(nn.Module): def __init__(self, channels, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(CbamModule, self).__init__() self.channel = ChannelAttn(channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x class LightCbamModule(nn.Module): def __init__(self, channels, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightCbamModule, self).__init__() self.channel = LightChannelAttn(channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = LightSpatialAttn(spatial_kernel_size) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x # File: pytorch-image-models-main/timm/layers/classifier.py """""" from collections import OrderedDict from functools import partial from typing import Optional, Union, Callable import torch import torch.nn as nn from torch.nn import functional as F from .adaptive_avgmax_pool import SelectAdaptivePool2d from .create_act import get_act_layer from .create_norm import get_norm_layer def _create_pool(num_features: int, num_classes: int, pool_type: str='avg', use_conv: bool=False, input_fmt: Optional[str]=None): flatten_in_pool = not use_conv if not pool_type: flatten_in_pool = False global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool, input_fmt=input_fmt) num_pooled_features = num_features * global_pool.feat_mult() return (global_pool, num_pooled_features) def _create_fc(num_features, num_classes, use_conv=False): if num_classes <= 0: fc = nn.Identity() elif use_conv: fc = nn.Conv2d(num_features, num_classes, 1, bias=True) else: fc = nn.Linear(num_features, num_classes, bias=True) return fc def create_classifier(num_features: int, num_classes: int, pool_type: str='avg', use_conv: bool=False, input_fmt: str='NCHW', drop_rate: Optional[float]=None): (global_pool, num_pooled_features) = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv, input_fmt=input_fmt) fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) if drop_rate is not None: dropout = nn.Dropout(drop_rate) return (global_pool, dropout, fc) return (global_pool, fc) class ClassifierHead(nn.Module): def __init__(self, in_features: int, num_classes: int, pool_type: str='avg', drop_rate: float=0.0, use_conv: bool=False, input_fmt: str='NCHW'): super(ClassifierHead, self).__init__() self.in_features = in_features self.use_conv = use_conv self.input_fmt = input_fmt (global_pool, fc) = create_classifier(in_features, num_classes, pool_type, use_conv=use_conv, input_fmt=input_fmt) self.global_pool = global_pool self.drop = nn.Dropout(drop_rate) self.fc = fc self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() def reset(self, num_classes: int, pool_type: Optional[str]=None): if pool_type is not None and pool_type != self.global_pool.pool_type: (self.global_pool, self.fc) = create_classifier(self.in_features, num_classes, pool_type=pool_type, use_conv=self.use_conv, input_fmt=self.input_fmt) self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity() else: num_pooled_features = self.in_features * self.global_pool.feat_mult() self.fc = _create_fc(num_pooled_features, num_classes, use_conv=self.use_conv) def forward(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.drop(x) if pre_logits: return self.flatten(x) x = self.fc(x) return self.flatten(x) class NormMlpClassifierHead(nn.Module): def __init__(self, in_features: int, num_classes: int, hidden_size: Optional[int]=None, pool_type: str='avg', drop_rate: float=0.0, norm_layer: Union[str, Callable]='layernorm2d', act_layer: Union[str, Callable]='tanh'): super().__init__() self.in_features = in_features self.hidden_size = hidden_size self.num_features = in_features self.use_conv = not pool_type norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) self.norm = norm_layer(in_features) self.flatten = nn.Flatten(1) if pool_type else nn.Identity() if hidden_size: self.pre_logits = nn.Sequential(OrderedDict([('fc', linear_layer(in_features, hidden_size)), ('act', act_layer())])) self.num_features = hidden_size else: self.pre_logits = nn.Identity() self.drop = nn.Dropout(drop_rate) self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def reset(self, num_classes: int, pool_type: Optional[str]=None): if pool_type is not None: self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) self.flatten = nn.Flatten(1) if pool_type else nn.Identity() self.use_conv = self.global_pool.is_identity() linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear if self.hidden_size: if isinstance(self.pre_logits.fc, nn.Conv2d) and (not self.use_conv) or (isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv): with torch.no_grad(): new_fc = linear_layer(self.in_features, self.hidden_size) new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape)) new_fc.bias.copy_(self.pre_logits.fc.bias) self.pre_logits.fc = new_fc self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.norm(x) x = self.flatten(x) x = self.pre_logits(x) x = self.drop(x) if pre_logits: return x x = self.fc(x) return x class ClNormMlpClassifierHead(nn.Module): def __init__(self, in_features: int, num_classes: int, hidden_size: Optional[int]=None, pool_type: str='avg', drop_rate: float=0.0, norm_layer: Union[str, Callable]='layernorm', act_layer: Union[str, Callable]='gelu', input_fmt: str='NHWC'): super().__init__() self.in_features = in_features self.hidden_size = hidden_size self.num_features = in_features assert pool_type in ('', 'avg', 'max', 'avgmax') self.pool_type = pool_type assert input_fmt in ('NHWC', 'NLC') self.pool_dim = 1 if input_fmt == 'NLC' else (1, 2) norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) self.norm = norm_layer(in_features) if hidden_size: self.pre_logits = nn.Sequential(OrderedDict([('fc', nn.Linear(in_features, hidden_size)), ('act', act_layer())])) self.num_features = hidden_size else: self.pre_logits = nn.Identity() self.drop = nn.Dropout(drop_rate) self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def reset(self, num_classes: int, pool_type: Optional[str]=None, reset_other: bool=False): if pool_type is not None: self.pool_type = pool_type if reset_other: self.pre_logits = nn.Identity() self.norm = nn.Identity() self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def _global_pool(self, x): if self.pool_type: if self.pool_type == 'avg': x = x.mean(dim=self.pool_dim) elif self.pool_type == 'max': x = x.amax(dim=self.pool_dim) elif self.pool_type == 'avgmax': x = 0.5 * (x.amax(dim=self.pool_dim) + x.mean(dim=self.pool_dim)) return x def forward(self, x, pre_logits: bool=False): x = self._global_pool(x) x = self.norm(x) x = self.pre_logits(x) x = self.drop(x) if pre_logits: return x x = self.fc(x) return x # File: pytorch-image-models-main/timm/layers/cond_conv2d.py """""" import math from functools import partial import numpy as np import torch from torch import nn as nn from torch.nn import functional as F from .helpers import to_2tuple from .conv2d_same import conv2d_same from .padding import get_padding_value def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): num_params = np.prod(expert_shape) if len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params: raise ValueError('CondConv variables must have shape [num_experts, num_params]') for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer class CondConv2d(nn.Module): __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = to_2tuple(kernel_size) self.stride = to_2tuple(stride) (padding_val, is_padding_dynamic) = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic self.padding = to_2tuple(padding_val) self.dilation = to_2tuple(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer(partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if self.bias is not None: fan_in = np.prod(self.weight_shape[1:]) bound = 1 / math.sqrt(fan_in) init_bias = get_condconv_initializer(partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): (B, C, H, W) = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size weight = weight.view(new_weight_shape) bias = None if self.bias is not None: bias = torch.matmul(routing_weights, self.bias) bias = bias.view(B * self.out_channels) x = x.reshape(1, B * C, H, W) if self.dynamic_padding: out = conv2d_same(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) else: out = F.conv2d(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) return out # File: pytorch-image-models-main/timm/layers/config.py """""" import os import warnings from typing import Any, Optional import torch __all__ = ['is_exportable', 'is_scriptable', 'is_no_jit', 'use_fused_attn', 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config', 'set_fused_attn'] _NO_JIT = False _NO_ACTIVATION_JIT = False _EXPORTABLE = False _SCRIPTABLE = False _HAS_FUSED_ATTN = hasattr(torch.nn.functional, 'scaled_dot_product_attention') if 'TIMM_FUSED_ATTN' in os.environ: _USE_FUSED_ATTN = int(os.environ['TIMM_FUSED_ATTN']) else: _USE_FUSED_ATTN = 1 def is_no_jit(): return _NO_JIT class set_no_jit: def __init__(self, mode: bool) -> None: global _NO_JIT self.prev = _NO_JIT _NO_JIT = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _NO_JIT _NO_JIT = self.prev return False def is_exportable(): return _EXPORTABLE class set_exportable: def __init__(self, mode: bool) -> None: global _EXPORTABLE self.prev = _EXPORTABLE _EXPORTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _EXPORTABLE _EXPORTABLE = self.prev return False def is_scriptable(): return _SCRIPTABLE class set_scriptable: def __init__(self, mode: bool) -> None: global _SCRIPTABLE self.prev = _SCRIPTABLE _SCRIPTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE _SCRIPTABLE = self.prev return False class set_layer_config: def __init__(self, scriptable: Optional[bool]=None, exportable: Optional[bool]=None, no_jit: Optional[bool]=None, no_activation_jit: Optional[bool]=None): global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT self.prev = (_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT) if scriptable is not None: _SCRIPTABLE = scriptable if exportable is not None: _EXPORTABLE = exportable if no_jit is not None: _NO_JIT = no_jit if no_activation_jit is not None: _NO_ACTIVATION_JIT = no_activation_jit def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT (_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT) = self.prev return False def use_fused_attn(experimental: bool=False) -> bool: if not _HAS_FUSED_ATTN or _EXPORTABLE: return False if experimental: return _USE_FUSED_ATTN > 1 return _USE_FUSED_ATTN > 0 def set_fused_attn(enable: bool=True, experimental: bool=False): global _USE_FUSED_ATTN if not _HAS_FUSED_ATTN: warnings.warn('This version of pytorch does not have F.scaled_dot_product_attention, fused_attn flag ignored.') return if experimental and enable: _USE_FUSED_ATTN = 2 elif enable: _USE_FUSED_ATTN = 1 else: _USE_FUSED_ATTN = 0 # File: pytorch-image-models-main/timm/layers/conv2d_same.py """""" import torch import torch.nn as nn import torch.nn.functional as F from typing import Tuple, Optional from .config import is_exportable, is_scriptable from .padding import pad_same, pad_same_arg, get_padding_value _USE_EXPORT_CONV = False def conv2d_same(x, weight: torch.Tensor, bias: Optional[torch.Tensor]=None, stride: Tuple[int, int]=(1, 1), padding: Tuple[int, int]=(0, 0), dilation: Tuple[int, int]=(1, 1), groups: int=1): x = pad_same(x, weight.shape[-2:], stride, dilation) return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) class Conv2dSame(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) def forward(self, x): return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class Conv2dSameExport(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dSameExport, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) self.pad = None self.pad_input_size = (0, 0) def forward(self, x): input_size = x.size()[-2:] if self.pad is None: pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation) self.pad = nn.ZeroPad2d(pad_arg) self.pad_input_size = input_size x = self.pad(x) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): padding = kwargs.pop('padding', '') kwargs.setdefault('bias', False) (padding, is_dynamic) = get_padding_value(padding, kernel_size, **kwargs) if is_dynamic: if _USE_EXPORT_CONV and is_exportable(): assert not is_scriptable() return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) else: return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) else: return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) # File: pytorch-image-models-main/timm/layers/conv_bn_act.py """""" from typing import Any, Dict, Optional, Type from torch import nn as nn from .typing import LayerType, PadType from .blur_pool import create_aa from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class ConvNormAct(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int=1, stride: int=1, padding: PadType='', dilation: int=1, groups: int=1, bias: bool=False, apply_norm: bool=True, apply_act: bool=True, norm_layer: LayerType=nn.BatchNorm2d, act_layer: Optional[LayerType]=nn.ReLU, aa_layer: Optional[LayerType]=None, drop_layer: Optional[Type[nn.Module]]=None, conv_kwargs: Optional[Dict[str, Any]]=None, norm_kwargs: Optional[Dict[str, Any]]=None, act_kwargs: Optional[Dict[str, Any]]=None): super(ConvNormAct, self).__init__() conv_kwargs = conv_kwargs or {} norm_kwargs = norm_kwargs or {} act_kwargs = act_kwargs or {} use_aa = aa_layer is not None and stride > 1 self.conv = create_conv2d(in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, padding=padding, dilation=dilation, groups=groups, bias=bias, **conv_kwargs) if apply_norm: norm_act_layer = get_norm_act_layer(norm_layer, act_layer) if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn = norm_act_layer(out_channels, apply_act=apply_act, act_kwargs=act_kwargs, **norm_kwargs) else: self.bn = nn.Sequential() if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn.add_module('drop', drop_layer()) self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa, noop=None) @property def in_channels(self): return self.conv.in_channels @property def out_channels(self): return self.conv.out_channels def forward(self, x): x = self.conv(x) x = self.bn(x) if self.aa is not None: x = self.aa(x) return x ConvBnAct = ConvNormAct ConvNormActAa = ConvNormAct # File: pytorch-image-models-main/timm/layers/create_act.py """""" from typing import Union, Callable, Type from .activations import * from .activations_me import * from .config import is_exportable, is_scriptable _has_silu = 'silu' in dir(torch.nn.functional) _has_hardswish = 'hardswish' in dir(torch.nn.functional) _has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) _has_mish = 'mish' in dir(torch.nn.functional) _ACT_FN_DEFAULT = dict(silu=F.silu if _has_silu else swish, swish=F.silu if _has_silu else swish, mish=F.mish if _has_mish else mish, relu=F.relu, relu6=F.relu6, leaky_relu=F.leaky_relu, elu=F.elu, celu=F.celu, selu=F.selu, gelu=gelu, gelu_tanh=gelu_tanh, quick_gelu=quick_gelu, sigmoid=sigmoid, tanh=tanh, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, hard_swish=F.hardswish if _has_hardswish else hard_swish, hard_mish=hard_mish) _ACT_FN_ME = dict(silu=F.silu if _has_silu else swish_me, swish=F.silu if _has_silu else swish_me, mish=F.mish if _has_mish else mish_me, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, hard_swish=F.hardswish if _has_hardswish else hard_swish_me, hard_mish=hard_mish_me) _ACT_FNS = (_ACT_FN_ME, _ACT_FN_DEFAULT) for a in _ACT_FNS: a.setdefault('hardsigmoid', a.get('hard_sigmoid')) a.setdefault('hardswish', a.get('hard_swish')) _ACT_LAYER_DEFAULT = dict(silu=nn.SiLU if _has_silu else Swish, swish=nn.SiLU if _has_silu else Swish, mish=nn.Mish if _has_mish else Mish, relu=nn.ReLU, relu6=nn.ReLU6, leaky_relu=nn.LeakyReLU, elu=nn.ELU, prelu=PReLU, celu=nn.CELU, selu=nn.SELU, gelu=GELU, gelu_tanh=GELUTanh, quick_gelu=QuickGELU, sigmoid=Sigmoid, tanh=Tanh, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, hard_swish=nn.Hardswish if _has_hardswish else HardSwish, hard_mish=HardMish, identity=nn.Identity) _ACT_LAYER_ME = dict(silu=nn.SiLU if _has_silu else SwishMe, swish=nn.SiLU if _has_silu else SwishMe, mish=nn.Mish if _has_mish else MishMe, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, hard_mish=HardMishMe) _ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_DEFAULT) for a in _ACT_LAYERS: a.setdefault('hardsigmoid', a.get('hard_sigmoid')) a.setdefault('hardswish', a.get('hard_swish')) def get_act_fn(name: Union[Callable, str]='relu'): if not name: return None if isinstance(name, Callable): return name name = name.lower() if not (is_exportable() or is_scriptable()): if name in _ACT_FN_ME: return _ACT_FN_ME[name] return _ACT_FN_DEFAULT[name] def get_act_layer(name: Union[Type[nn.Module], str]='relu'): if name is None: return None if not isinstance(name, str): return name if not name: return None name = name.lower() if not (is_exportable() or is_scriptable()): if name in _ACT_LAYER_ME: return _ACT_LAYER_ME[name] return _ACT_LAYER_DEFAULT[name] def create_act_layer(name: Union[Type[nn.Module], str], inplace=None, **kwargs): act_layer = get_act_layer(name) if act_layer is None: return None if inplace is None: return act_layer(**kwargs) try: return act_layer(inplace=inplace, **kwargs) except TypeError: return act_layer(**kwargs) # File: pytorch-image-models-main/timm/layers/create_attn.py """""" import torch from functools import partial from .bottleneck_attn import BottleneckAttn from .cbam import CbamModule, LightCbamModule from .eca import EcaModule, CecaModule from .gather_excite import GatherExcite from .global_context import GlobalContext from .halo_attn import HaloAttn from .lambda_layer import LambdaLayer from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .selective_kernel import SelectiveKernel from .split_attn import SplitAttn from .squeeze_excite import SEModule, EffectiveSEModule def get_attn(attn_type): if isinstance(attn_type, torch.nn.Module): return attn_type module_cls = None if attn_type: if isinstance(attn_type, str): attn_type = attn_type.lower() if attn_type == 'se': module_cls = SEModule elif attn_type == 'ese': module_cls = EffectiveSEModule elif attn_type == 'eca': module_cls = EcaModule elif attn_type == 'ecam': module_cls = partial(EcaModule, use_mlp=True) elif attn_type == 'ceca': module_cls = CecaModule elif attn_type == 'ge': module_cls = GatherExcite elif attn_type == 'gc': module_cls = GlobalContext elif attn_type == 'gca': module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) elif attn_type == 'cbam': module_cls = CbamModule elif attn_type == 'lcbam': module_cls = LightCbamModule elif attn_type == 'sk': module_cls = SelectiveKernel elif attn_type == 'splat': module_cls = SplitAttn elif attn_type == 'lambda': return LambdaLayer elif attn_type == 'bottleneck': return BottleneckAttn elif attn_type == 'halo': return HaloAttn elif attn_type == 'nl': module_cls = NonLocalAttn elif attn_type == 'bat': module_cls = BatNonLocalAttn else: assert False, 'Invalid attn module (%s)' % attn_type elif isinstance(attn_type, bool): if attn_type: module_cls = SEModule else: module_cls = attn_type return module_cls def create_attn(attn_type, channels, **kwargs): module_cls = get_attn(attn_type) if module_cls is not None: return module_cls(channels, **kwargs) return None # File: pytorch-image-models-main/timm/layers/create_conv2d.py """""" from .mixed_conv2d import MixedConv2d from .cond_conv2d import CondConv2d from .conv2d_same import create_conv2d_pad def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): if isinstance(kernel_size, list): assert 'num_experts' not in kwargs if 'groups' in kwargs: groups = kwargs.pop('groups') if groups == in_channels: kwargs['depthwise'] = True else: assert groups == 1 m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) else: depthwise = kwargs.pop('depthwise', False) groups = in_channels if depthwise else kwargs.pop('groups', 1) if 'num_experts' in kwargs and kwargs['num_experts'] > 0: m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) else: m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) return m # File: pytorch-image-models-main/timm/layers/create_norm.py """""" import functools import types from typing import Type import torch.nn as nn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm from torchvision.ops.misc import FrozenBatchNorm2d _NORM_MAP = dict(batchnorm=nn.BatchNorm2d, batchnorm2d=nn.BatchNorm2d, batchnorm1d=nn.BatchNorm1d, groupnorm=GroupNorm, groupnorm1=GroupNorm1, layernorm=LayerNorm, layernorm2d=LayerNorm2d, rmsnorm=RmsNorm, frozenbatchnorm2d=FrozenBatchNorm2d) _NORM_TYPES = {m for (n, m) in _NORM_MAP.items()} def create_norm_layer(layer_name, num_features, **kwargs): layer = get_norm_layer(layer_name) layer_instance = layer(num_features, **kwargs) return layer_instance def get_norm_layer(norm_layer): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) norm_kwargs = {} if isinstance(norm_layer, functools.partial): norm_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '').lower() norm_layer = _NORM_MAP[layer_name] else: norm_layer = norm_layer if norm_kwargs: norm_layer = functools.partial(norm_layer, **norm_kwargs) return norm_layer # File: pytorch-image-models-main/timm/layers/create_norm_act.py """""" import types import functools from .evo_norm import * from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d from .inplace_abn import InplaceAbn _NORM_ACT_MAP = dict(batchnorm=BatchNormAct2d, batchnorm2d=BatchNormAct2d, groupnorm=GroupNormAct, groupnorm1=functools.partial(GroupNormAct, num_groups=1), layernorm=LayerNormAct, layernorm2d=LayerNormAct2d, evonormb0=EvoNorm2dB0, evonormb1=EvoNorm2dB1, evonormb2=EvoNorm2dB2, evonorms0=EvoNorm2dS0, evonorms0a=EvoNorm2dS0a, evonorms1=EvoNorm2dS1, evonorms1a=EvoNorm2dS1a, evonorms2=EvoNorm2dS2, evonorms2a=EvoNorm2dS2a, frn=FilterResponseNormAct2d, frntlu=FilterResponseNormTlu2d, inplaceabn=InplaceAbn, iabn=InplaceAbn) _NORM_ACT_TYPES = {m for (n, m) in _NORM_ACT_MAP.items()} _NORM_ACT_REQUIRES_ARG = {BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): layer = get_norm_act_layer(layer_name, act_layer=act_layer) layer_instance = layer(num_features, apply_act=apply_act, **kwargs) if jit: layer_instance = torch.jit.script(layer_instance) return layer_instance def get_norm_act_layer(norm_layer, act_layer=None): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) norm_act_kwargs = {} if isinstance(norm_layer, functools.partial): norm_act_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '').lower().split('-')[0] norm_act_layer = _NORM_ACT_MAP[layer_name] elif norm_layer in _NORM_ACT_TYPES: norm_act_layer = norm_layer elif isinstance(norm_layer, types.FunctionType): norm_act_layer = norm_layer else: type_name = norm_layer.__name__.lower() if type_name.startswith('batchnorm'): norm_act_layer = BatchNormAct2d elif type_name.startswith('groupnorm'): norm_act_layer = GroupNormAct elif type_name.startswith('groupnorm1'): norm_act_layer = functools.partial(GroupNormAct, num_groups=1) elif type_name.startswith('layernorm2d'): norm_act_layer = LayerNormAct2d elif type_name.startswith('layernorm'): norm_act_layer = LayerNormAct else: assert False, f'No equivalent norm_act layer for {type_name}' if norm_act_layer in _NORM_ACT_REQUIRES_ARG: norm_act_kwargs.setdefault('act_layer', act_layer) if norm_act_kwargs: norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) return norm_act_layer # File: pytorch-image-models-main/timm/layers/drop.py """""" import torch import torch.nn as nn import torch.nn.functional as F from .grid import ndgrid def drop_block_2d(x, drop_prob: float=0.1, block_size: int=7, gamma_scale: float=1.0, with_noise: bool=False, inplace: bool=False, batchwise: bool=False): (B, C, H, W) = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ((W - block_size + 1) * (H - block_size + 1)) (w_i, h_i) = ndgrid(torch.arange(W, device=x.device), torch.arange(H, device=x.device)) valid_block = (w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2) & ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) if batchwise: uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) else: uniform_noise = torch.rand_like(x) block_mask = (2 - gamma - valid_block + uniform_noise >= 1).to(dtype=x.dtype) block_mask = -F.max_pool2d(-block_mask, kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) if inplace: x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) else: x = x * block_mask + normal_noise * (1 - block_mask) else: normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-07)).to(x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x def drop_block_fast_2d(x: torch.Tensor, drop_prob: float=0.1, block_size: int=7, gamma_scale: float=1.0, with_noise: bool=False, inplace: bool=False): (B, C, H, W) = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ((W - block_size + 1) * (H - block_size + 1)) block_mask = torch.empty_like(x).bernoulli_(gamma) block_mask = F.max_pool2d(block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.empty_like(x).normal_() if inplace: x.mul_(1.0 - block_mask).add_(normal_noise * block_mask) else: x = x * (1.0 - block_mask) + normal_noise * block_mask else: block_mask = 1 - block_mask normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-06)).to(dtype=x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x class DropBlock2d(nn.Module): def __init__(self, drop_prob: float=0.1, block_size: int=7, gamma_scale: float=1.0, with_noise: bool=False, inplace: bool=False, batchwise: bool=False, fast: bool=True): super(DropBlock2d, self).__init__() self.drop_prob = drop_prob self.gamma_scale = gamma_scale self.block_size = block_size self.with_noise = with_noise self.inplace = inplace self.batchwise = batchwise self.fast = fast def forward(self, x): if not self.training or not self.drop_prob: return x if self.fast: return drop_block_fast_2d(x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) else: return drop_block_2d(x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) def drop_path(x, drop_prob: float=0.0, training: bool=False, scale_by_keep: bool=True): if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = x.new_empty(shape).bernoulli_(keep_prob) if keep_prob > 0.0 and scale_by_keep: random_tensor.div_(keep_prob) return x * random_tensor class DropPath(nn.Module): def __init__(self, drop_prob: float=0.0, scale_by_keep: bool=True): super(DropPath, self).__init__() self.drop_prob = drop_prob self.scale_by_keep = scale_by_keep def forward(self, x): return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) def extra_repr(self): return f'drop_prob={round(self.drop_prob, 3):0.3f}' # File: pytorch-image-models-main/timm/layers/eca.py """""" import math from torch import nn import torch.nn.functional as F from .create_act import create_act_layer from .helpers import make_divisible class EcaModule(nn.Module): def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', rd_ratio=1 / 8, rd_channels=None, rd_divisor=8, use_mlp=False): super(EcaModule, self).__init__() if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) assert kernel_size % 2 == 1 padding = (kernel_size - 1) // 2 if use_mlp: assert channels is not None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) act_layer = act_layer or nn.ReLU self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) self.act = create_act_layer(act_layer) self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) else: self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) self.act = None self.conv2 = None self.gate = create_act_layer(gate_layer) def forward(self, x): y = x.mean((2, 3)).view(x.shape[0], 1, -1) y = self.conv(y) if self.conv2 is not None: y = self.act(y) y = self.conv2(y) y = self.gate(y).view(x.shape[0], -1, 1, 1) return x * y.expand_as(x) EfficientChannelAttn = EcaModule class CecaModule(nn.Module): def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): super(CecaModule, self).__init__() if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) has_act = act_layer is not None assert kernel_size % 2 == 1 self.padding = (kernel_size - 1) // 2 self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) self.gate = create_act_layer(gate_layer) def forward(self, x): y = x.mean((2, 3)).view(x.shape[0], 1, -1) y = F.pad(y, (self.padding, self.padding), mode='circular') y = self.conv(y) y = self.gate(y).view(x.shape[0], -1, 1, 1) return x * y.expand_as(x) CircularEfficientChannelAttn = CecaModule # File: pytorch-image-models-main/timm/layers/evo_norm.py """""" from typing import Sequence, Union import torch import torch.nn as nn import torch.nn.functional as F from .create_act import create_act_layer from .trace_utils import _assert def instance_std(x, eps: float=1e-05): std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) return std.expand(x.shape) def instance_std_tpu(x, eps: float=1e-05): std = manual_var(x, dim=(2, 3)).add(eps).sqrt() return std.expand(x.shape) def instance_rms(x, eps: float=1e-05): rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) return rms.expand(x.shape) def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool=False): xm = x.mean(dim=dim, keepdim=True) if diff_sqm: var = ((x * x).mean(dim=dim, keepdim=True) - xm * xm).clamp(0) else: var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) return var def group_std(x, groups: int=32, eps: float=1e-05, flatten: bool=False): (B, C, H, W) = x.shape x_dtype = x.dtype _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) else: x = x.reshape(B, groups, C // groups, H, W) std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) return std.expand(x.shape).reshape(B, C, H, W) def group_std_tpu(x, groups: int=32, eps: float=1e-05, diff_sqm: bool=False, flatten: bool=False): (B, C, H, W) = x.shape _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) var = manual_var(x, dim=-1, diff_sqm=diff_sqm) else: x = x.reshape(B, groups, C // groups, H, W) var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) def group_rms(x, groups: int=32, eps: float=1e-05): (B, C, H, W) = x.shape _assert(C % groups == 0, '') x_dtype = x.dtype x = x.reshape(B, groups, C // groups, H, W) rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) return rms.expand(x.shape).reshape(B, C, H, W) class EvoNorm2dB0(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=0.001, **_): super().__init__() self.apply_act = apply_act self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_(self.running_var * (1 - self.momentum) + var.detach() * self.momentum * (n / (n - 1))) else: var = self.running_var left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) v = self.v.to(x_dtype).view(v_shape) right = x * v + instance_std(x, self.eps) x = x / left.max(right) return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) class EvoNorm2dB1(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-05, **_): super().__init__() self.apply_act = apply_act self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_(self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = (x + 1) * instance_rms(x, self.eps) x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dB2(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-05, **_): super().__init__() self.apply_act = apply_act self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_(self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = instance_rms(x, self.eps) - x x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0(nn.Module): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-05, **_): super().__init__() self.apply_act = apply_act if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0a(EvoNorm2dS0): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=0.001, **_): super().__init__(num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) d = group_std(x, self.groups, self.eps) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() x = x / d return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1(nn.Module): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-05, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.pre_act_norm = False self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1a(EvoNorm2dS1): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=0.001, **_): super().__init__(num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2(nn.Module): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-05, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2a(EvoNorm2dS2): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=0.001, **_): super().__init__(num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) # File: pytorch-image-models-main/timm/layers/fast_norm.py """""" from typing import List, Optional import torch from torch.nn import functional as F try: from apex.normalization.fused_layer_norm import fused_layer_norm_affine has_apex = True except ImportError: has_apex = False try: from apex.normalization.fused_layer_norm import fused_rms_norm_affine, fused_rms_norm has_apex_rmsnorm = True except ImportError: has_apex_rmsnorm = False _USE_FAST_NORM = False def is_fast_norm(): return _USE_FAST_NORM def set_fast_norm(enable=True): global _USE_FAST_NORM _USE_FAST_NORM = enable def fast_group_norm(x: torch.Tensor, num_groups: int, weight: Optional[torch.Tensor]=None, bias: Optional[torch.Tensor]=None, eps: float=1e-05) -> torch.Tensor: if torch.jit.is_scripting(): return F.group_norm(x, num_groups, weight, bias, eps) if torch.is_autocast_enabled(): dt = torch.get_autocast_gpu_dtype() (x, weight, bias) = (x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None) with torch.cuda.amp.autocast(enabled=False): return F.group_norm(x, num_groups, weight, bias, eps) def fast_layer_norm(x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor]=None, bias: Optional[torch.Tensor]=None, eps: float=1e-05) -> torch.Tensor: if torch.jit.is_scripting(): return F.layer_norm(x, normalized_shape, weight, bias, eps) if has_apex: return fused_layer_norm_affine(x, weight, bias, normalized_shape, eps) if torch.is_autocast_enabled(): dt = torch.get_autocast_gpu_dtype() (x, weight, bias) = (x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None) with torch.cuda.amp.autocast(enabled=False): return F.layer_norm(x, normalized_shape, weight, bias, eps) def rms_norm(x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor]=None, eps: float=1e-05): norm_ndim = len(normalized_shape) if torch.jit.is_scripting(): assert norm_ndim == 1 v = torch.var(x, dim=-1).unsqueeze(-1) else: dims = tuple(range(-1, -norm_ndim - 1, -1)) v = torch.var(x, dim=dims, keepdim=True) x = x * torch.rsqrt(v + eps) if weight is not None: x = x * weight return x def fast_rms_norm(x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor]=None, eps: float=1e-05) -> torch.Tensor: if torch.jit.is_scripting(): return rms_norm(x, normalized_shape, weight, eps) if has_apex_rmsnorm: if weight is None: return fused_rms_norm(x, normalized_shape, eps) else: return fused_rms_norm_affine(x, weight, normalized_shape, eps) return rms_norm(x, normalized_shape, weight, eps) # File: pytorch-image-models-main/timm/layers/filter_response_norm.py """""" import torch import torch.nn as nn from .create_act import create_act_layer from .trace_utils import _assert def inv_instance_rms(x, eps: float=1e-05): rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) return rms.expand(x.shape) class FilterResponseNormTlu2d(nn.Module): def __init__(self, num_features, apply_act=True, eps=1e-05, rms=True, **_): super(FilterResponseNormTlu2d, self).__init__() self.apply_act = apply_act self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.tau is not None: nn.init.zeros_(self.tau) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x class FilterResponseNormAct2d(nn.Module): def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-05, **_): super(FilterResponseNormAct2d, self).__init__() if act_layer is not None and apply_act: self.act = create_act_layer(act_layer, inplace=inplace) else: self.act = nn.Identity() self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return self.act(x) # File: pytorch-image-models-main/timm/layers/format.py from enum import Enum from typing import Union import torch class Format(str, Enum): NCHW = 'NCHW' NHWC = 'NHWC' NCL = 'NCL' NLC = 'NLC' FormatT = Union[str, Format] def get_spatial_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NLC: dim = (1,) elif fmt is Format.NCL: dim = (2,) elif fmt is Format.NHWC: dim = (1, 2) else: dim = (2, 3) return dim def get_channel_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NHWC: dim = 3 elif fmt is Format.NLC: dim = 2 else: dim = 1 return dim def nchw_to(x: torch.Tensor, fmt: Format): if fmt == Format.NHWC: x = x.permute(0, 2, 3, 1) elif fmt == Format.NLC: x = x.flatten(2).transpose(1, 2) elif fmt == Format.NCL: x = x.flatten(2) return x def nhwc_to(x: torch.Tensor, fmt: Format): if fmt == Format.NCHW: x = x.permute(0, 3, 1, 2) elif fmt == Format.NLC: x = x.flatten(1, 2) elif fmt == Format.NCL: x = x.flatten(1, 2).transpose(1, 2) return x # File: pytorch-image-models-main/timm/layers/gather_excite.py """""" import math from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .create_conv2d import create_conv2d from .helpers import make_divisible from .mlp import ConvMlp class GatherExcite(nn.Module): def __init__(self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=1, add_maxpool=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): super(GatherExcite, self).__init__() self.add_maxpool = add_maxpool act_layer = get_act_layer(act_layer) self.extent = extent if extra_params: self.gather = nn.Sequential() if extent == 0: assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' self.gather.add_module('conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) if norm_layer: self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) else: assert extent % 2 == 0 num_conv = int(math.log2(extent)) for i in range(num_conv): self.gather.add_module(f'conv{i + 1}', create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) if norm_layer: self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) if i != num_conv - 1: self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) else: self.gather = None if self.extent == 0: self.gk = 0 self.gs = 0 else: assert extent % 2 == 0 self.gk = self.extent * 2 - 1 self.gs = self.extent if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.0) self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() self.gate = create_act_layer(gate_layer) def forward(self, x): size = x.shape[-2:] if self.gather is not None: x_ge = self.gather(x) elif self.extent == 0: x_ge = x.mean(dim=(2, 3), keepdims=True) if self.add_maxpool: x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) else: x_ge = F.avg_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) if self.add_maxpool: x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) x_ge = self.mlp(x_ge) if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: x_ge = F.interpolate(x_ge, size=size) return x * self.gate(x_ge) # File: pytorch-image-models-main/timm/layers/global_context.py """""" from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible from .mlp import ConvMlp from .norm import LayerNorm2d class GlobalContext(nn.Module): def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, rd_ratio=1.0 / 8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): super(GlobalContext, self).__init__() act_layer = get_act_layer(act_layer) self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.0) if fuse_add: self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_add = None if fuse_scale: self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_scale = None self.gate = create_act_layer(gate_layer) self.init_last_zero = init_last_zero self.reset_parameters() def reset_parameters(self): if self.conv_attn is not None: nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') if self.mlp_add is not None: nn.init.zeros_(self.mlp_add.fc2.weight) def forward(self, x): (B, C, H, W) = x.shape if self.conv_attn is not None: attn = self.conv_attn(x).reshape(B, 1, H * W) attn = F.softmax(attn, dim=-1).unsqueeze(3) context = x.reshape(B, C, H * W).unsqueeze(1) @ attn context = context.view(B, C, 1, 1) else: context = x.mean(dim=(2, 3), keepdim=True) if self.mlp_scale is not None: mlp_x = self.mlp_scale(context) x = x * self.gate(mlp_x) if self.mlp_add is not None: mlp_x = self.mlp_add(context) x = x + mlp_x return x # File: pytorch-image-models-main/timm/layers/grid.py from typing import Tuple import torch def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]: try: return torch.meshgrid(*tensors, indexing='ij') except TypeError: return torch.meshgrid(*tensors) def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]: return torch.meshgrid(*tensors, indexing='xy') # File: pytorch-image-models-main/timm/layers/grn.py """""" import torch from torch import nn as nn class GlobalResponseNorm(nn.Module): def __init__(self, dim, eps=1e-06, channels_last=True): super().__init__() self.eps = eps if channels_last: self.spatial_dim = (1, 2) self.channel_dim = -1 self.wb_shape = (1, 1, 1, -1) else: self.spatial_dim = (2, 3) self.channel_dim = 1 self.wb_shape = (1, -1, 1, 1) self.weight = nn.Parameter(torch.zeros(dim)) self.bias = nn.Parameter(torch.zeros(dim)) def forward(self, x): x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True) x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps) return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n) # File: pytorch-image-models-main/timm/layers/halo_attn.py """""" from typing import List import torch from torch import nn import torch.nn.functional as F from .helpers import make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): (B, H, W, dim) = q.shape rel_size = rel_k.shape[0] win_size = (rel_size + 1) // 2 x = q @ rel_k.transpose(-1, -2) x = x.reshape(-1, W, rel_size) x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, rel_size - W]) x_pad = x_pad.reshape(-1, W + 1, rel_size) x = x_pad[:, :W, win_size - 1:] x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): def __init__(self, block_size, win_size, dim_head, scale): super().__init__() self.block_size = block_size self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) def forward(self, q): (B, BB, HW, _) = q.shape q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, BB, HW, -1) return rel_logits class HaloAttn(nn.Module): def __init__(self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0 assert stride in (1, 2) self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** (-0.5) self.scale_pos_embed = scale_pos_embed self.block_size = self.block_size_ds = block_size self.halo_size = halo_size self.win_size = block_size + halo_size * 2 self.block_stride = 1 use_avg_pool = False if stride > 1: use_avg_pool = avg_down or block_size % stride != 0 self.block_stride = 1 if use_avg_pool else stride self.block_size_ds = self.block_size // self.block_stride self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) self.pos_embed = PosEmbedRel(block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() self.reset_parameters() def reset_parameters(self): std = self.q.weight.shape[1] ** (-0.5) trunc_normal_(self.q.weight, std=std) trunc_normal_(self.kv.weight, std=std) trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): (B, C, H, W) = x.shape _assert(H % self.block_size == 0, '') _assert(W % self.block_size == 0, '') num_h_blocks = H // self.block_size num_w_blocks = W // self.block_size num_blocks = num_h_blocks * num_w_blocks q = self.q(x) q = q.reshape(-1, self.dim_head_qk, num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) kv = self.kv(x) kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape(B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) (k, v) = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) if self.scale_pos_embed: attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale else: attn = q @ k.transpose(-1, -2) * self.scale + self.pos_embed(q) attn = attn.softmax(dim=-1) out = (attn @ v).transpose(1, 3) out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) out = out.permute(0, 3, 1, 4, 2).contiguous().view(B, self.dim_out_v, H // self.block_stride, W // self.block_stride) out = self.pool(out) return out '' # File: pytorch-image-models-main/timm/layers/helpers.py """""" from itertools import repeat import collections.abc def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable) and (not isinstance(x, str)): return tuple(x) return tuple(repeat(x, n)) return parse to_1tuple = _ntuple(1) to_2tuple = _ntuple(2) to_3tuple = _ntuple(3) to_4tuple = _ntuple(4) to_ntuple = _ntuple def make_divisible(v, divisor=8, min_value=None, round_limit=0.9): min_value = min_value or divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < round_limit * v: new_v += divisor return new_v def extend_tuple(x, n): if not isinstance(x, (tuple, list)): x = (x,) else: x = tuple(x) pad_n = n - len(x) if pad_n <= 0: return x[:n] return x + (x[-1],) * pad_n # File: pytorch-image-models-main/timm/layers/hybrid_embed.py """""" import logging import math from typing import List, Optional, Tuple, Union import torch from torch import nn as nn import torch.nn.functional as F from .format import Format, nchw_to from .helpers import to_2tuple from .patch_embed import resample_patch_embed _logger = logging.getLogger(__name__) class HybridEmbed(nn.Module): output_fmt: Format dynamic_img_pad: torch.jit.Final[bool] def __init__(self, backbone: nn.Module, img_size: Union[int, Tuple[int, int]]=224, patch_size: Union[int, Tuple[int, int]]=1, feature_size: Optional[Union[int, Tuple[int, int]]]=None, feature_ratio: Optional[Union[int, Tuple[int, int]]]=None, in_chans: int=3, embed_dim: int=768, bias: bool=True, proj: bool=True, flatten: bool=True, output_fmt: Optional[str]=None, strict_img_size: bool=True, dynamic_img_pad: bool=False): super().__init__() assert isinstance(backbone, nn.Module) self.backbone = backbone self.in_chans = in_chans (self.img_size, self.patch_size, self.feature_size, self.feature_ratio, self.feature_dim, self.grid_size, self.num_patches) = self._init_backbone(img_size=img_size, patch_size=patch_size, feature_size=feature_size, feature_ratio=feature_ratio) if output_fmt is not None: self.flatten = False self.output_fmt = Format(output_fmt) else: self.flatten = flatten self.output_fmt = Format.NCHW self.strict_img_size = strict_img_size self.dynamic_img_pad = dynamic_img_pad if not dynamic_img_pad: assert self.feature_size[0] % self.patch_size[0] == 0 and self.feature_size[1] % self.patch_size[1] == 0 if proj: self.proj = nn.Conv2d(self.feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) else: assert self.feature_dim == embed_dim, f'The feature dim ({self.feature_dim} must match embed dim ({embed_dim}) when projection disabled.' self.proj = nn.Identity() def _init_backbone(self, img_size: Union[int, Tuple[int, int]]=224, patch_size: Union[int, Tuple[int, int]]=1, feature_size: Optional[Union[int, Tuple[int, int]]]=None, feature_ratio: Optional[Union[int, Tuple[int, int]]]=None, feature_dim: Optional[int]=None): img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) if feature_size is None: with torch.no_grad(): training = self.backbone.training if training: self.backbone.eval() o = self.backbone(torch.zeros(1, self.in_chans, img_size[0], img_size[1])) if isinstance(o, (list, tuple)): o = o[-1] feature_size = o.shape[-2:] feature_dim = o.shape[1] self.backbone.train(training) feature_ratio = tuple([s // f for (s, f) in zip(img_size, feature_size)]) else: feature_size = to_2tuple(feature_size) feature_ratio = to_2tuple(feature_ratio or 16) if feature_dim is None: if hasattr(self.backbone, 'feature_info'): feature_dim = self.backbone.feature_info.channels()[-1] else: feature_dim = self.backbone.num_features grid_size = tuple([f // p for (f, p) in zip(feature_size, patch_size)]) num_patches = grid_size[0] * grid_size[1] return (img_size, patch_size, feature_size, feature_ratio, feature_dim, grid_size, num_patches) def set_input_size(self, img_size: Optional[Union[int, Tuple[int, int]]]=None, patch_size: Optional[Union[int, Tuple[int, int]]]=None, feature_size: Optional[Union[int, Tuple[int, int]]]=None, feature_ratio: Optional[Union[int, Tuple[int, int]]]=None, feature_dim: Optional[int]=None): assert img_size is not None or patch_size is not None img_size = img_size or self.img_size new_patch_size = None if patch_size is not None: new_patch_size = to_2tuple(patch_size) if new_patch_size is not None and new_patch_size != self.patch_size: assert isinstance(self.proj, nn.Conv2d), 'HybridEmbed must have a projection layer to change patch size.' with torch.no_grad(): new_proj = nn.Conv2d(self.proj.in_channels, self.proj.out_channels, kernel_size=new_patch_size, stride=new_patch_size, bias=self.proj.bias is not None) new_proj.weight.copy_(resample_patch_embed(self.proj.weight, new_patch_size, verbose=True)) if self.proj.bias is not None: new_proj.bias.copy_(self.proj.bias) self.proj = new_proj patch_size = new_patch_size patch_size = patch_size or self.patch_size if img_size != self.img_size or patch_size != self.patch_size: (self.img_size, self.patch_size, self.feature_size, self.feature_ratio, self.feature_dim, self.grid_size, self.num_patches) = self._init_backbone(img_size=img_size, patch_size=patch_size, feature_size=feature_size, feature_ratio=feature_ratio, feature_dim=feature_dim) def feat_ratio(self, as_scalar=True) -> Union[Tuple[int, int], int]: total_reduction = (self.feature_ratio[0] * self.patch_size[0], self.feature_ratio[1] * self.patch_size[1]) if as_scalar: return max(total_reduction) else: return total_reduction def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]: feat_size = (img_size[0] // self.feature_ratio[0], img_size[1] // self.feature_ratio[1]) if self.dynamic_img_pad: return (math.ceil(feat_size[0] / self.patch_size[0]), math.ceil(feat_size[1] / self.patch_size[1])) else: return (feat_size[0] // self.patch_size[0], feat_size[1] // self.patch_size[1]) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True): if hasattr(self.backbone, 'set_grad_checkpointing'): self.backbone.set_grad_checkpointing(enable=enable) elif hasattr(self.backbone, 'grad_checkpointing'): self.backbone.grad_checkpointing = enable def forward(self, x): x = self.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] (_, _, H, W) = x.shape if self.dynamic_img_pad: pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0] pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1] x = F.pad(x, (0, pad_w, 0, pad_h)) x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) return x class HybridEmbedWithSize(HybridEmbed): def __init__(self, backbone: nn.Module, img_size: Union[int, Tuple[int, int]]=224, patch_size: Union[int, Tuple[int, int]]=1, feature_size: Optional[Union[int, Tuple[int, int]]]=None, feature_ratio: Optional[Union[int, Tuple[int, int]]]=None, in_chans: int=3, embed_dim: int=768, bias=True, proj=True): super().__init__(backbone=backbone, img_size=img_size, patch_size=patch_size, feature_size=feature_size, feature_ratio=feature_ratio, in_chans=in_chans, embed_dim=embed_dim, bias=bias, proj=proj) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True): if hasattr(self.backbone, 'set_grad_checkpointing'): self.backbone.set_grad_checkpointing(enable=enable) elif hasattr(self.backbone, 'grad_checkpointing'): self.backbone.grad_checkpointing = enable def forward(self, x) -> Tuple[torch.Tensor, List[int]]: x = self.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] x = self.proj(x) return (x.flatten(2).transpose(1, 2), x.shape[-2:]) # File: pytorch-image-models-main/timm/layers/inplace_abn.py import torch from torch import nn as nn try: from inplace_abn.functions import inplace_abn, inplace_abn_sync has_iabn = True except ImportError: has_iabn = False def inplace_abn(x, weight, bias, running_mean, running_var, training=True, momentum=0.1, eps=1e-05, activation='leaky_relu', activation_param=0.01): raise ImportError("Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") def inplace_abn_sync(**kwargs): inplace_abn(**kwargs) class InplaceAbn(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, apply_act=True, act_layer='leaky_relu', act_param=0.01, drop_layer=None): super(InplaceAbn, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.momentum = momentum if apply_act: if isinstance(act_layer, str): assert act_layer in ('leaky_relu', 'elu', 'identity', '') self.act_name = act_layer if act_layer else 'identity' elif act_layer == nn.ELU: self.act_name = 'elu' elif act_layer == nn.LeakyReLU: self.act_name = 'leaky_relu' elif act_layer is None or act_layer == nn.Identity: self.act_name = 'identity' else: assert False, f'Invalid act layer {act_layer.__name__} for IABN' else: self.act_name = 'identity' self.act_param = act_param if self.affine: self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.running_mean, 0) nn.init.constant_(self.running_var, 1) if self.affine: nn.init.constant_(self.weight, 1) nn.init.constant_(self.bias, 0) def forward(self, x): output = inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.act_name, self.act_param) if isinstance(output, tuple): output = output[0] return output # File: pytorch-image-models-main/timm/layers/interpolate.py """""" import torch from itertools import product class RegularGridInterpolator: def __init__(self, points, values): self.points = points self.values = values assert isinstance(self.points, tuple) or isinstance(self.points, list) assert isinstance(self.values, torch.Tensor) self.ms = list(self.values.shape) self.n = len(self.points) assert len(self.ms) == self.n for (i, p) in enumerate(self.points): assert isinstance(p, torch.Tensor) assert p.shape[0] == self.values.shape[i] def __call__(self, points_to_interp): assert self.points is not None assert self.values is not None assert len(points_to_interp) == len(self.points) K = points_to_interp[0].shape[0] for x in points_to_interp: assert x.shape[0] == K idxs = [] dists = [] overalls = [] for (p, x) in zip(self.points, points_to_interp): idx_right = torch.bucketize(x, p) idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1 idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1) dist_left = x - p[idx_left] dist_right = p[idx_right] - x dist_left[dist_left < 0] = 0.0 dist_right[dist_right < 0] = 0.0 both_zero = (dist_left == 0) & (dist_right == 0) dist_left[both_zero] = dist_right[both_zero] = 1.0 idxs.append((idx_left, idx_right)) dists.append((dist_left, dist_right)) overalls.append(dist_left + dist_right) numerator = 0.0 for indexer in product([0, 1], repeat=self.n): as_s = [idx[onoff] for (onoff, idx) in zip(indexer, idxs)] bs_s = [dist[1 - onoff] for (onoff, dist) in zip(indexer, dists)] numerator += self.values[as_s] * torch.prod(torch.stack(bs_s), dim=0) denominator = torch.prod(torch.stack(overalls), dim=0) return numerator / denominator # File: pytorch-image-models-main/timm/layers/lambda_layer.py """""" import torch from torch import nn import torch.nn.functional as F from .grid import ndgrid from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ def rel_pos_indices(size): size = to_2tuple(size) pos = torch.stack(ndgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) rel_pos = pos[:, None, :] - pos[:, :, None] rel_pos[0] += size[0] - 1 rel_pos[1] += size[1] - 1 return rel_pos class LambdaLayer(nn.Module): def __init__(self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, qk_ratio=1.0, qkv_bias=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0, ' should be divided by num_heads' self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.num_heads = num_heads self.dim_v = dim_out // num_heads self.qkv = nn.Conv2d(dim, num_heads * self.dim_qk + self.dim_qk + self.dim_v, kernel_size=1, bias=qkv_bias) self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) self.norm_v = nn.BatchNorm2d(self.dim_v) if r is not None: self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) self.pos_emb = None self.rel_pos_indices = None else: assert feat_size is not None feat_size = to_2tuple(feat_size) rel_size = [2 * s - 1 for s in feat_size] self.conv_lambda = None self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** (-0.5)) if self.conv_lambda is not None: trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** (-0.5)) if self.pos_emb is not None: trunc_normal_(self.pos_emb, std=0.02) def forward(self, x): (B, C, H, W) = x.shape M = H * W qkv = self.qkv(x) (q, k, v) = torch.split(qkv, [self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) content_lam = k @ v content_out = q @ content_lam.unsqueeze(1) if self.pos_emb is None: position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) else: pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) out = self.pool(out) return out # File: pytorch-image-models-main/timm/layers/layer_scale.py import torch from torch import nn class LayerScale(nn.Module): def __init__(self, dim: int, init_values: float=1e-05, inplace: bool=False) -> None: super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: return x.mul_(self.gamma) if self.inplace else x * self.gamma class LayerScale2d(nn.Module): def __init__(self, dim: int, init_values: float=1e-05, inplace: bool=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma # File: pytorch-image-models-main/timm/layers/linear.py """""" import torch import torch.nn.functional as F from torch import nn as nn class Linear(nn.Linear): def forward(self, input: torch.Tensor) -> torch.Tensor: if torch.jit.is_scripting(): bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) else: return F.linear(input, self.weight, self.bias) # File: pytorch-image-models-main/timm/layers/median_pool.py """""" import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, to_4tuple class MedianPool2d(nn.Module): def __init__(self, kernel_size=3, stride=1, padding=0, same=False): super(MedianPool2d, self).__init__() self.k = to_2tuple(kernel_size) self.stride = to_2tuple(stride) self.padding = to_4tuple(padding) self.same = same def _padding(self, x): if self.same: (ih, iw) = x.size()[2:] if ih % self.stride[0] == 0: ph = max(self.k[0] - self.stride[0], 0) else: ph = max(self.k[0] - ih % self.stride[0], 0) if iw % self.stride[1] == 0: pw = max(self.k[1] - self.stride[1], 0) else: pw = max(self.k[1] - iw % self.stride[1], 0) pl = pw // 2 pr = pw - pl pt = ph // 2 pb = ph - pt padding = (pl, pr, pt, pb) else: padding = self.padding return padding def forward(self, x): x = F.pad(x, self._padding(x), mode='reflect') x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] return x # File: pytorch-image-models-main/timm/layers/mixed_conv2d.py """""" import torch from torch import nn as nn from .conv2d_same import create_conv2d_pad def _split_channels(num_chan, num_groups): split = [num_chan // num_groups for _ in range(num_groups)] split[0] += num_chan - sum(split) return split class MixedConv2d(nn.ModuleDict): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs): super(MixedConv2d, self).__init__() kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for (idx, (k, in_ch, out_ch)) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = in_ch if depthwise else 1 self.add_module(str(idx), create_conv2d_pad(in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs)) self.splits = in_splits def forward(self, x): x_split = torch.split(x, self.splits, 1) x_out = [c(x_split[i]) for (i, c) in enumerate(self.values())] x = torch.cat(x_out, 1) return x # File: pytorch-image-models-main/timm/layers/ml_decoder.py from typing import Optional import torch from torch import nn from torch import nn, Tensor from torch.nn.modules.transformer import _get_activation_fn def add_ml_decoder_head(model): if hasattr(model, 'global_pool') and hasattr(model, 'fc'): model.global_pool = nn.Identity() del model.fc num_classes = model.num_classes num_features = model.num_features model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): model.global_pool = nn.Identity() del model.classifier num_classes = model.num_classes num_features = model.num_features model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): del model.head num_classes = model.num_classes num_features = model.num_features model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) else: print('Model code-writing is not aligned currently with ml-decoder') exit(-1) if hasattr(model, 'drop_rate'): model.drop_rate = 0 return model class TransformerDecoderLayerOptimal(nn.Module): def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation='relu', layer_norm_eps=1e-05) -> None: super(TransformerDecoderLayerOptimal, self).__init__() self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = torch.nn.functional.relu super(TransformerDecoderLayerOptimal, self).__setstate__(state) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None) -> Tensor: tgt = tgt + self.dropout1(tgt) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt class MLDecoder(nn.Module): def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): super(MLDecoder, self).__init__() embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups if embed_len_decoder > num_classes: embed_len_decoder = num_classes self.embed_len_decoder = embed_len_decoder decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) decoder_dropout = 0.1 num_layers_decoder = 1 dim_feedforward = 2048 layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, dim_feedforward=dim_feedforward, dropout=decoder_dropout) self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) self.query_embed.requires_grad_(False) self.num_classes = num_classes self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) self.duplicate_pooling = torch.nn.Parameter(torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) torch.nn.init.xavier_normal_(self.duplicate_pooling) torch.nn.init.constant_(self.duplicate_pooling_bias, 0) def forward(self, x): if len(x.shape) == 4: embedding_spatial = x.flatten(2).transpose(1, 2) else: embedding_spatial = x embedding_spatial_786 = self.embed_standart(embedding_spatial) embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) bs = embedding_spatial_786.shape[0] query_embed = self.query_embed.weight tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) h = h.transpose(0, 1) out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) for i in range(self.embed_len_decoder): h_i = h[:, i, :] w_i = self.duplicate_pooling[i, :, :] out_extrap[:, i, :] = torch.matmul(h_i, w_i) h_out = out_extrap.flatten(1)[:, :self.num_classes] h_out += self.duplicate_pooling_bias logits = h_out return logits # File: pytorch-image-models-main/timm/layers/mlp.py """""" from functools import partial from torch import nn as nn from .grn import GlobalResponseNorm from .helpers import to_2tuple class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, bias=True, drop=0.0, use_conv=False): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GluMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, norm_layer=None, bias=True, drop=0.0, use_conv=False, gate_last=True): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.chunk_dim = 1 if use_conv else -1 self.gate_last = gate_last self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features // 2) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features // 2, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-06) def forward(self, x): x = self.fc1(x) (x1, x2) = x.chunk(2, dim=self.chunk_dim) x = x1 * self.act(x2) if self.gate_last else self.act(x1) * x2 x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x SwiGLUPacked = partial(GluMlp, act_layer=nn.SiLU, gate_last=False) class SwiGLU(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, norm_layer=None, bias=True, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1_g = nn.Linear(in_features, hidden_features, bias=bias[0]) self.fc1_x = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): nn.init.ones_(self.fc1_g.bias) nn.init.normal_(self.fc1_g.weight, std=1e-06) def forward(self, x): x_gate = self.fc1_g(x) x = self.fc1_x(x) x = self.act(x_gate) * x x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GatedMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, gate_layer=None, bias=True, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if gate_layer is not None: assert hidden_features % 2 == 0 self.gate = gate_layer(hidden_features) hidden_features = hidden_features // 2 else: self.gate = nn.Identity() self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.gate(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class ConvMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=True, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.drop = nn.Dropout(drop) self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x class GlobalResponseNormMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.0, use_conv=False): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.grn = GlobalResponseNorm(hidden_features, channels_last=not use_conv) self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.grn(x) x = self.fc2(x) x = self.drop2(x) return x # File: pytorch-image-models-main/timm/layers/non_local_attn.py """""" import torch from torch import nn from torch.nn import functional as F from .conv_bn_act import ConvNormAct from .helpers import make_divisible from .trace_utils import _assert class NonLocalAttn(nn.Module): def __init__(self, in_channels, use_scale=True, rd_ratio=1 / 8, rd_channels=None, rd_divisor=8, **kwargs): super(NonLocalAttn, self).__init__() if rd_channels is None: rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) self.scale = in_channels ** (-0.5) if use_scale else 1.0 self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) self.norm = nn.BatchNorm2d(in_channels) self.reset_parameters() def forward(self, x): shortcut = x t = self.t(x) p = self.p(x) g = self.g(x) (B, C, H, W) = t.size() t = t.view(B, C, -1).permute(0, 2, 1) p = p.view(B, C, -1) g = g.view(B, C, -1).permute(0, 2, 1) att = torch.bmm(t, p) * self.scale att = F.softmax(att, dim=2) x = torch.bmm(att, g) x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.z(x) x = self.norm(x) + shortcut return x def reset_parameters(self): for (name, m) in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if len(list(m.parameters())) > 1: nn.init.constant_(m.bias, 0.0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 0) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.GroupNorm): nn.init.constant_(m.weight, 0) nn.init.constant_(m.bias, 0) class BilinearAttnTransform(nn.Module): def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): super(BilinearAttnTransform, self).__init__() self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.block_size = block_size self.groups = groups self.in_channels = in_channels def resize_mat(self, x, t: int): (B, C, block_size, block_size1) = x.shape _assert(block_size == block_size1, '') if t <= 1: return x x = x.view(B * C, -1, 1, 1) x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) x = x.view(B * C, block_size, block_size, t, t) x = torch.cat(torch.split(x, 1, dim=1), dim=3) x = torch.cat(torch.split(x, 1, dim=2), dim=4) x = x.view(B, C, block_size * t, block_size * t) return x def forward(self, x): _assert(x.shape[-1] % self.block_size == 0, '') _assert(x.shape[-2] % self.block_size == 0, '') (B, C, H, W) = x.shape out = self.conv1(x) rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) cp = F.adaptive_max_pool2d(out, (1, self.block_size)) p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() p = p / p.sum(dim=3, keepdim=True) q = q / q.sum(dim=2, keepdim=True) p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() p = p.view(B, C, self.block_size, self.block_size) q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() q = q.view(B, C, self.block_size, self.block_size) p = self.resize_mat(p, H // self.block_size) q = self.resize_mat(q, W // self.block_size) y = p.matmul(x) y = y.matmul(q) y = self.conv2(y) return y class BatNonLocalAttn(nn.Module): def __init__(self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): super().__init__() if rd_channels is None: rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.dropout = nn.Dropout2d(p=drop_rate) def forward(self, x): xl = self.conv1(x) y = self.ba(xl) y = self.conv2(y) y = self.dropout(y) return y + x # File: pytorch-image-models-main/timm/layers/norm.py """""" import numbers from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm class GroupNorm(nn.GroupNorm): def __init__(self, num_channels, num_groups=32, eps=1e-05, affine=True): super().__init__(num_groups, num_channels, eps=eps, affine=affine) self.fast_norm = is_fast_norm() def forward(self, x): if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class GroupNorm1(nn.GroupNorm): def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs) self.fast_norm = is_fast_norm() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class LayerNorm(nn.LayerNorm): def __init__(self, num_channels, eps=1e-06, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) return x class LayerNorm2d(nn.LayerNorm): def __init__(self, num_channels, eps=1e-06, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) return x def _is_contiguous(tensor: torch.Tensor) -> bool: if torch.jit.is_scripting(): return tensor.is_contiguous() else: return tensor.is_contiguous(memory_format=torch.contiguous_format) def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): (s, u) = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) x = (x - u) * torch.rsqrt(s + eps) x = x * weight[:, None, None] + bias[:, None, None] return x def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): u = x.mean(dim=1, keepdim=True) s = ((x * x).mean(dim=1, keepdim=True) - u * u).clamp(0) x = (x - u) * torch.rsqrt(s + eps) x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1) return x class LayerNormExp2d(nn.LayerNorm): def __init__(self, num_channels, eps=1e-06): super().__init__(num_channels, eps=eps) def forward(self, x) -> torch.Tensor: if _is_contiguous(x): x = F.layer_norm(x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) else: x = _layer_norm_cf(x, self.weight, self.bias, self.eps) return x class RmsNorm(nn.Module): __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__(self, channels, eps=1e-06, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): normalized_shape = (normalized_shape,) self.normalized_shape = tuple(normalized_shape) self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) return x # File: pytorch-image-models-main/timm/layers/norm_act.py """""" from typing import Union, List, Optional, Any import torch from torch import nn as nn from torch.nn import functional as F from torchvision.ops.misc import FrozenBatchNorm2d from .create_act import create_act_layer from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm from .trace_utils import _assert def _create_act(act_layer, act_kwargs=None, inplace=False, apply_act=True): act_kwargs = act_kwargs or {} act_kwargs.setdefault('inplace', inplace) act = None if apply_act: act = create_act_layer(act_layer, **act_kwargs) return nn.Identity() if act is None else act class BatchNormAct2d(nn.BatchNorm2d): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, device=None, dtype=None): try: factory_kwargs = {'device': device, 'dtype': dtype} super(BatchNormAct2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, **factory_kwargs) except TypeError: super(BatchNormAct2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x): _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') if self.momentum is None: exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if self.training and self.track_running_stats: if self.num_batches_tracked is not None: self.num_batches_tracked.add_(1) if self.momentum is None: exponential_average_factor = 1.0 / float(self.num_batches_tracked) else: exponential_average_factor = self.momentum '' if self.training: bn_training = True else: bn_training = self.running_mean is None and self.running_var is None '' x = F.batch_norm(x, self.running_mean if not self.training or self.track_running_stats else None, self.running_var if not self.training or self.track_running_stats else None, self.weight, self.bias, bn_training, exponential_average_factor, self.eps) x = self.drop(x) x = self.act(x) return x class SyncBatchNormAct(nn.SyncBatchNorm): def forward(self, x: torch.Tensor) -> torch.Tensor: x = super().forward(x) if hasattr(self, 'drop'): x = self.drop(x) if hasattr(self, 'act'): x = self.act(x) return x def convert_sync_batchnorm(module, process_group=None): module_output = module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if isinstance(module, BatchNormAct2d): module_output = SyncBatchNormAct(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group=process_group) module_output.act = module.act module_output.drop = module.drop else: module_output = torch.nn.SyncBatchNorm(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group) if module.affine: with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked if hasattr(module, 'qconfig'): module_output.qconfig = module.qconfig for (name, child) in module.named_children(): module_output.add_module(name, convert_sync_batchnorm(child, process_group)) del module return module_output class FrozenBatchNormAct2d(torch.nn.Module): def __init__(self, num_features: int, eps: float=1e-05, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None): super().__init__() self.eps = eps self.register_buffer('weight', torch.ones(num_features)) self.register_buffer('bias', torch.zeros(num_features)) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def _load_from_state_dict(self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: List[str], unexpected_keys: List[str], error_msgs: List[str]): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x: torch.Tensor) -> torch.Tensor: w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) scale = w * (rv + self.eps).rsqrt() bias = b - rm * scale x = x * scale + bias x = self.act(self.drop(x)) return x def __repr__(self) -> str: return f'{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps}, act={self.act})' def freeze_batch_norm_2d(module): res = module if isinstance(module, (BatchNormAct2d, SyncBatchNormAct)): res = FrozenBatchNormAct2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): res = FrozenBatchNorm2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for (name, child) in module.named_children(): new_child = freeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def unfreeze_batch_norm_2d(module): res = module if isinstance(module, FrozenBatchNormAct2d): res = BatchNormAct2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, FrozenBatchNorm2d): res = torch.nn.BatchNorm2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for (name, child) in module.named_children(): new_child = unfreeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def _num_groups(num_channels, num_groups, group_size): if group_size: assert num_channels % group_size == 0 return num_channels // group_size return num_groups class GroupNormAct(nn.GroupNorm): def __init__(self, num_channels, num_groups=32, eps=1e-05, affine=True, group_size=None, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None): super(GroupNormAct, self).__init__(_num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class GroupNorm1Act(nn.GroupNorm): def __init__(self, num_channels, eps=1e-05, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None): super(GroupNorm1Act, self).__init__(1, num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct(nn.LayerNorm): def __init__(self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-05, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None): super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct2d(nn.LayerNorm): def __init__(self, num_channels, eps=1e-05, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None): super(LayerNormAct2d, self).__init__(num_channels, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) x = self.drop(x) x = self.act(x) return x # File: pytorch-image-models-main/timm/layers/padding.py """""" import math from typing import List, Tuple, Union import torch import torch.nn.functional as F from .helpers import to_2tuple def get_padding(kernel_size: int, stride: int=1, dilation: int=1, **_) -> Union[int, List[int]]: if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]): (kernel_size, stride, dilation) = (to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)) return [get_padding(*a) for a in zip(kernel_size, stride, dilation)] padding = (stride - 1 + dilation * (kernel_size - 1)) // 2 return padding def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int): if isinstance(x, torch.Tensor): return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0) else: return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) def is_static_pad(kernel_size: int, stride: int=1, dilation: int=1, **_): if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]): (kernel_size, stride, dilation) = (to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)) return all([is_static_pad(*a) for a in zip(kernel_size, stride, dilation)]) return stride == 1 and dilation * (kernel_size - 1) % 2 == 0 def pad_same_arg(input_size: List[int], kernel_size: List[int], stride: List[int], dilation: List[int]=(1, 1)) -> List[int]: (ih, iw) = input_size (kh, kw) = kernel_size pad_h = get_same_padding(ih, kh, stride[0], dilation[0]) pad_w = get_same_padding(iw, kw, stride[1], dilation[1]) return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] def pad_same(x, kernel_size: List[int], stride: List[int], dilation: List[int]=(1, 1), value: float=0): (ih, iw) = x.size()[-2:] pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0]) pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1]) x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value) return x def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: dynamic = False if isinstance(padding, str): padding = padding.lower() if padding == 'same': if is_static_pad(kernel_size, **kwargs): padding = get_padding(kernel_size, **kwargs) else: padding = 0 dynamic = True elif padding == 'valid': padding = 0 else: padding = get_padding(kernel_size, **kwargs) return (padding, dynamic) # File: pytorch-image-models-main/timm/layers/patch_dropout.py from typing import Optional, Tuple, Union import torch import torch.nn as nn class PatchDropout(nn.Module): return_indices: torch.jit.Final[bool] def __init__(self, prob: float=0.5, num_prefix_tokens: int=1, ordered: bool=False, return_indices: bool=False): super().__init__() assert 0 <= prob < 1.0 self.prob = prob self.num_prefix_tokens = num_prefix_tokens self.ordered = ordered self.return_indices = return_indices def forward(self, x) -> Union[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]: if not self.training or self.prob == 0.0: if self.return_indices: return (x, None) return x if self.num_prefix_tokens: (prefix_tokens, x) = (x[:, :self.num_prefix_tokens], x[:, self.num_prefix_tokens:]) else: prefix_tokens = None B = x.shape[0] L = x.shape[1] num_keep = max(1, int(L * (1.0 - self.prob))) keep_indices = torch.argsort(torch.randn(B, L, device=x.device), dim=-1)[:, :num_keep] if self.ordered: keep_indices = keep_indices.sort(dim=-1)[0] x = x.gather(1, keep_indices.unsqueeze(-1).expand((-1, -1) + x.shape[2:])) if prefix_tokens is not None: x = torch.cat((prefix_tokens, x), dim=1) if self.return_indices: return (x, keep_indices) return x # File: pytorch-image-models-main/timm/layers/patch_embed.py """""" import logging import math from typing import Callable, List, Optional, Tuple, Union import torch from torch import nn as nn import torch.nn.functional as F from .format import Format, nchw_to from .helpers import to_2tuple from .trace_utils import _assert _logger = logging.getLogger(__name__) class PatchEmbed(nn.Module): output_fmt: Format dynamic_img_pad: torch.jit.Final[bool] def __init__(self, img_size: Optional[int]=224, patch_size: int=16, in_chans: int=3, embed_dim: int=768, norm_layer: Optional[Callable]=None, flatten: bool=True, output_fmt: Optional[str]=None, bias: bool=True, strict_img_size: bool=True, dynamic_img_pad: bool=False): super().__init__() self.patch_size = to_2tuple(patch_size) (self.img_size, self.grid_size, self.num_patches) = self._init_img_size(img_size) if output_fmt is not None: self.flatten = False self.output_fmt = Format(output_fmt) else: self.flatten = flatten self.output_fmt = Format.NCHW self.strict_img_size = strict_img_size self.dynamic_img_pad = dynamic_img_pad self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def _init_img_size(self, img_size: Union[int, Tuple[int, int]]): assert self.patch_size if img_size is None: return (None, None, None) img_size = to_2tuple(img_size) grid_size = tuple([s // p for (s, p) in zip(img_size, self.patch_size)]) num_patches = grid_size[0] * grid_size[1] return (img_size, grid_size, num_patches) def set_input_size(self, img_size: Optional[Union[int, Tuple[int, int]]]=None, patch_size: Optional[Union[int, Tuple[int, int]]]=None): new_patch_size = None if patch_size is not None: new_patch_size = to_2tuple(patch_size) if new_patch_size is not None and new_patch_size != self.patch_size: with torch.no_grad(): new_proj = nn.Conv2d(self.proj.in_channels, self.proj.out_channels, kernel_size=new_patch_size, stride=new_patch_size, bias=self.proj.bias is not None) new_proj.weight.copy_(resample_patch_embed(self.proj.weight, new_patch_size, verbose=True)) if self.proj.bias is not None: new_proj.bias.copy_(self.proj.bias) self.proj = new_proj self.patch_size = new_patch_size img_size = img_size or self.img_size if img_size != self.img_size or new_patch_size is not None: (self.img_size, self.grid_size, self.num_patches) = self._init_img_size(img_size) def feat_ratio(self, as_scalar=True) -> Union[Tuple[int, int], int]: if as_scalar: return max(self.patch_size) else: return self.patch_size def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]: if self.dynamic_img_pad: return (math.ceil(img_size[0] / self.patch_size[0]), math.ceil(img_size[1] / self.patch_size[1])) else: return (img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1]) def forward(self, x): (B, C, H, W) = x.shape if self.img_size is not None: if self.strict_img_size: _assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).") _assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).") elif not self.dynamic_img_pad: _assert(H % self.patch_size[0] == 0, f'Input height ({H}) should be divisible by patch size ({self.patch_size[0]}).') _assert(W % self.patch_size[1] == 0, f'Input width ({W}) should be divisible by patch size ({self.patch_size[1]}).') if self.dynamic_img_pad: pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0] pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1] x = F.pad(x, (0, pad_w, 0, pad_h)) x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) x = self.norm(x) return x class PatchEmbedWithSize(PatchEmbed): output_fmt: Format def __init__(self, img_size: Optional[int]=224, patch_size: int=16, in_chans: int=3, embed_dim: int=768, norm_layer: Optional[Callable]=None, flatten: bool=True, output_fmt: Optional[str]=None, bias: bool=True): super().__init__(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer, flatten=flatten, output_fmt=output_fmt, bias=bias) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: (B, C, H, W) = x.shape if self.img_size is not None: _assert(H % self.patch_size[0] == 0, f'Input image height ({H}) must be divisible by patch size ({self.patch_size[0]}).') _assert(W % self.patch_size[1] == 0, f'Input image width ({W}) must be divisible by patch size ({self.patch_size[1]}).') x = self.proj(x) feat_size = x.shape[-2:] if self.flatten: x = x.flatten(2).transpose(1, 2) elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) x = self.norm(x) return (x, feat_size) def resample_patch_embed(patch_embed, new_size: List[int], interpolation: str='bicubic', antialias: bool=True, verbose: bool=False): import numpy as np try: from torch import vmap except ImportError: from functorch import vmap assert len(patch_embed.shape) == 4, 'Four dimensions expected' assert len(new_size) == 2, 'New shape should only be hw' old_size = patch_embed.shape[-2:] if tuple(old_size) == tuple(new_size): return patch_embed if verbose: _logger.info(f'Resize patch embedding {patch_embed.shape} to {new_size}, w/ {interpolation} interpolation.') def resize(x_np, _new_size): x_tf = torch.Tensor(x_np)[None, None, ...] x_upsampled = F.interpolate(x_tf, size=_new_size, mode=interpolation, antialias=antialias)[0, 0, ...].numpy() return x_upsampled def get_resize_mat(_old_size, _new_size): mat = [] for i in range(np.prod(_old_size)): basis_vec = np.zeros(_old_size) basis_vec[np.unravel_index(i, _old_size)] = 1.0 mat.append(resize(basis_vec, _new_size).reshape(-1)) return np.stack(mat).T resize_mat = get_resize_mat(old_size, new_size) resize_mat_pinv = torch.tensor(np.linalg.pinv(resize_mat.T), device=patch_embed.device) def resample_kernel(kernel): resampled_kernel = resize_mat_pinv @ kernel.reshape(-1) return resampled_kernel.reshape(new_size) v_resample_kernel = vmap(vmap(resample_kernel, 0, 0), 1, 1) orig_dtype = patch_embed.dtype patch_embed = patch_embed.float() patch_embed = v_resample_kernel(patch_embed) patch_embed = patch_embed.to(orig_dtype) return patch_embed # File: pytorch-image-models-main/timm/layers/pool2d_same.py """""" import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Tuple, Optional from .helpers import to_2tuple from .padding import pad_same, get_padding_value def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int]=(0, 0), ceil_mode: bool=False, count_include_pad: bool=True): x = pad_same(x, kernel_size, stride) return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) class AvgPool2dSame(nn.AvgPool2d): def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) def forward(self, x): x = pad_same(x, self.kernel_size, self.stride) return F.avg_pool2d(x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) def max_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int]=(0, 0), dilation: List[int]=(1, 1), ceil_mode: bool=False): x = pad_same(x, kernel_size, stride, value=-float('inf')) return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) class MaxPool2dSame(nn.MaxPool2d): def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) def forward(self, x): x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): stride = stride or kernel_size padding = kwargs.pop('padding', '') (padding, is_dynamic) = get_padding_value(padding, kernel_size, stride=stride, **kwargs) if is_dynamic: if pool_type == 'avg': return AvgPool2dSame(kernel_size, stride=stride, **kwargs) elif pool_type == 'max': return MaxPool2dSame(kernel_size, stride=stride, **kwargs) else: assert False, f'Unsupported pool type {pool_type}' elif pool_type == 'avg': return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) elif pool_type == 'max': return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) else: assert False, f'Unsupported pool type {pool_type}' # File: pytorch-image-models-main/timm/layers/pos_embed.py """""" import logging import math from typing import List, Tuple, Optional, Union import torch import torch.nn.functional as F from .helpers import to_2tuple _logger = logging.getLogger(__name__) def resample_abs_pos_embed(posemb: torch.Tensor, new_size: List[int], old_size: Optional[List[int]]=None, num_prefix_tokens: int=1, interpolation: str='bicubic', antialias: bool=True, verbose: bool=False): num_pos_tokens = posemb.shape[1] num_new_tokens = new_size[0] * new_size[1] + num_prefix_tokens if num_new_tokens == num_pos_tokens and new_size[0] == new_size[1]: return posemb if old_size is None: hw = int(math.sqrt(num_pos_tokens - num_prefix_tokens)) old_size = (hw, hw) if num_prefix_tokens: (posemb_prefix, posemb) = (posemb[:, :num_prefix_tokens], posemb[:, num_prefix_tokens:]) else: (posemb_prefix, posemb) = (None, posemb) embed_dim = posemb.shape[-1] orig_dtype = posemb.dtype posemb = posemb.float() posemb = posemb.reshape(1, old_size[0], old_size[1], -1).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1).reshape(1, -1, embed_dim) posemb = posemb.to(orig_dtype) if posemb_prefix is not None: posemb = torch.cat([posemb_prefix, posemb], dim=1) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {old_size} to {new_size}.') return posemb def resample_abs_pos_embed_nhwc(posemb: torch.Tensor, new_size: List[int], interpolation: str='bicubic', antialias: bool=True, verbose: bool=False): if new_size[0] == posemb.shape[-3] and new_size[1] == posemb.shape[-2]: return posemb orig_dtype = posemb.dtype posemb = posemb.float() posemb = posemb.reshape(1, posemb.shape[-3], posemb.shape[-2], posemb.shape[-1]).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1).to(orig_dtype) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {posemb.shape[-3:-1]} to {new_size}.') return posemb # File: pytorch-image-models-main/timm/layers/pos_embed_rel.py """""" import math import os from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from .grid import ndgrid from .interpolate import RegularGridInterpolator from .mlp import Mlp from .weight_init import trunc_normal_ _USE_SCIPY = int(os.environ.get('TIMM_USE_SCIPY_INTERP', 0)) > 0 def gen_relative_position_index(q_size: Tuple[int, int], k_size: Optional[Tuple[int, int]]=None, class_token: bool=False) -> torch.Tensor: assert k_size is None, 'Different q & k sizes not currently supported' coords = torch.stack(ndgrid(torch.arange(q_size[0]), torch.arange(q_size[1]))).flatten(1) relative_coords = coords[:, :, None] - coords[:, None, :] relative_coords = relative_coords.permute(1, 2, 0) relative_coords[:, :, 0] += q_size[0] - 1 relative_coords[:, :, 1] += q_size[1] - 1 relative_coords[:, :, 0] *= 2 * q_size[1] - 1 num_relative_distance = (2 * q_size[0] - 1) * (2 * q_size[1] - 1) relative_position_index = relative_coords.sum(-1) if class_token: relative_position_index = F.pad(relative_position_index, [1, 0, 1, 0]) relative_position_index[0, 0:] = num_relative_distance relative_position_index[0:, 0] = num_relative_distance + 1 relative_position_index[0, 0] = num_relative_distance + 2 return relative_position_index.contiguous() def resize_rel_pos_bias_table_simple(rel_pos_bias, new_window_size: Tuple[int, int], new_bias_shape: Tuple[int, ...]): dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1) if rel_pos_bias.ndim == 3: (_, dst_h, dst_w) = new_bias_shape (num_attn_heads, src_h, src_w) = rel_pos_bias.shape assert dst_h == dst_size[0] and dst_w == dst_size[1] if src_h != dst_h or src_w != dst_w: rel_pos_bias = torch.nn.functional.interpolate(rel_pos_bias.unsqueeze(0), size=dst_size, mode='bicubic', align_corners=False).squeeze(0) else: assert rel_pos_bias.ndim == 2 (dst_num_pos, _) = new_bias_shape (src_num_pos, num_attn_heads) = rel_pos_bias.shape num_extra_tokens = dst_num_pos - dst_size[0] * dst_size[1] src_size = int((src_num_pos - num_extra_tokens) ** 0.5) src_size = (src_size, src_size) if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]: if num_extra_tokens: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] else: extra_tokens = None rel_pos_bias = torch.nn.functional.interpolate(rel_pos_bias.transpose(1, 0).reshape((1, -1, src_size[0], src_size[1])), size=dst_size, mode='bicubic', align_corners=False).view(-1, dst_num_pos - num_extra_tokens).transpose(0, 1) if extra_tokens is not None: rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) return rel_pos_bias def resize_rel_pos_bias_table_levit(position_bias_table, new_size, interpolation: str='bicubic', antialias: bool=True): (L1, nH1) = position_bias_table.size() (L2, nH2) = new_size assert nH1 == nH2 if L1 != L2: orig_dtype = position_bias_table.dtype position_bias_table = position_bias_table.float() S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) relative_position_bias_table_resized = F.interpolate(position_bias_table.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode=interpolation, antialias=antialias) relative_position_bias_table_resized = relative_position_bias_table_resized.view(nH2, L2).permute(1, 0) relative_position_bias_table_resized.to(orig_dtype) return relative_position_bias_table_resized else: return position_bias_table def resize_rel_pos_bias_table(rel_pos_bias, new_window_size: Tuple[int, int], new_bias_shape: Tuple[int, ...]): if _USE_SCIPY: from scipy import interpolate dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1) if rel_pos_bias.ndim == 3: num_extra_tokens = 0 (_, dst_h, dst_w) = new_bias_shape assert dst_h == dst_size[0] and dst_w == dst_size[1] (num_attn_heads, src_h, src_w) = rel_pos_bias.shape src_size = (src_h, src_w) has_flat_shape = False else: assert rel_pos_bias.ndim == 2 (dst_num_pos, _) = new_bias_shape (src_num_pos, num_attn_heads) = rel_pos_bias.shape num_extra_tokens = dst_num_pos - dst_size[0] * dst_size[1] src_size = int((src_num_pos - num_extra_tokens) ** 0.5) src_size = (src_size, src_size) has_flat_shape = True if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]: if num_extra_tokens: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] else: extra_tokens = None def geometric_progression(a, r, n): return a * (1.0 - r ** n) / (1.0 - r) def _calc(src, dst): (left, right) = (1.01, 1.5) while right - left > 1e-06: q = (left + right) / 2.0 gp = geometric_progression(1, q, src // 2) if gp > dst // 2: right = q else: left = q dis = [] cur = 1 for i in range(src // 2): dis.append(cur) cur += q ** (i + 1) r_ids = [-_ for _ in reversed(dis)] return r_ids + [0] + dis y = _calc(src_size[0], dst_size[0]) x = _calc(src_size[1], dst_size[1]) yx = [torch.tensor(y), torch.tensor(x)] ty = dst_size[0] // 2.0 tx = dst_size[1] // 2.0 dy = torch.arange(-ty, ty + 0.1, 1.0) dx = torch.arange(-tx, tx + 0.1, 1.0) dyx = ndgrid(dy, dx) all_rel_pos_bias = [] for i in range(num_attn_heads): if has_flat_shape: z = rel_pos_bias[:, i].view(src_size[0], src_size[1]).float() else: z = rel_pos_bias[i, :, :].float() if _USE_SCIPY: f = interpolate.interp2d(x, y, z.numpy(), kind='cubic') r = torch.Tensor(f(dx, dy)).contiguous().to(rel_pos_bias.device) else: f = RegularGridInterpolator(yx, z) r = f(dyx).contiguous().to(rel_pos_bias.device) if has_flat_shape: r = r.view(-1, 1) all_rel_pos_bias.append(r) if has_flat_shape: rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) else: rel_pos_bias = torch.cat(all_rel_pos_bias, dim=0) if extra_tokens is not None: assert has_flat_shape rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) return rel_pos_bias class RelPosBias(nn.Module): def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,) num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) self.register_buffer('relative_position_index', gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0).view(-1), persistent=False) self.init_weights() def init_weights(self): trunc_normal_(self.relative_position_bias_table, std=0.02) def get_bias(self) -> torch.Tensor: relative_position_bias = self.relative_position_bias_table[self.relative_position_index] relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor]=None): return attn + self.get_bias() def gen_relative_log_coords(win_size: Tuple[int, int], pretrained_win_size: Tuple[int, int]=(0, 0), mode='swin'): assert mode in ('swin', 'cr') relative_coords_h = torch.arange(-(win_size[0] - 1), win_size[0]).to(torch.float32) relative_coords_w = torch.arange(-(win_size[1] - 1), win_size[1]).to(torch.float32) relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w)) relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous() if mode == 'swin': if pretrained_win_size[0] > 0: relative_coords_table[:, :, 0] /= pretrained_win_size[0] - 1 relative_coords_table[:, :, 1] /= pretrained_win_size[1] - 1 else: relative_coords_table[:, :, 0] /= win_size[0] - 1 relative_coords_table[:, :, 1] /= win_size[1] - 1 relative_coords_table *= 8 relative_coords_table = torch.sign(relative_coords_table) * torch.log2(1.0 + relative_coords_table.abs()) / math.log2(8) else: relative_coords_table = torch.sign(relative_coords_table) * torch.log(1.0 + relative_coords_table.abs()) return relative_coords_table class RelPosMlp(nn.Module): def __init__(self, window_size, num_heads=8, hidden_dim=128, prefix_tokens=0, mode='cr', pretrained_window_size=(0, 0)): super().__init__() self.window_size = window_size self.window_area = self.window_size[0] * self.window_size[1] self.prefix_tokens = prefix_tokens self.num_heads = num_heads self.bias_shape = (self.window_area,) * 2 + (num_heads,) if mode == 'swin': self.bias_act = nn.Sigmoid() self.bias_gain = 16 mlp_bias = (True, False) else: self.bias_act = nn.Identity() self.bias_gain = None mlp_bias = True self.mlp = Mlp(2, hidden_features=hidden_dim, out_features=num_heads, act_layer=nn.ReLU, bias=mlp_bias, drop=(0.125, 0.0)) self.register_buffer('relative_position_index', gen_relative_position_index(window_size).view(-1), persistent=False) self.register_buffer('rel_coords_log', gen_relative_log_coords(window_size, pretrained_window_size, mode=mode), persistent=False) def get_bias(self) -> torch.Tensor: relative_position_bias = self.mlp(self.rel_coords_log) if self.relative_position_index is not None: relative_position_bias = relative_position_bias.view(-1, self.num_heads)[self.relative_position_index] relative_position_bias = relative_position_bias.view(self.bias_shape) relative_position_bias = relative_position_bias.permute(2, 0, 1) relative_position_bias = self.bias_act(relative_position_bias) if self.bias_gain is not None: relative_position_bias = self.bias_gain * relative_position_bias if self.prefix_tokens: relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0]) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor]=None): return attn + self.get_bias() def generate_lookup_tensor(length: int, max_relative_position: Optional[int]=None): if max_relative_position is None: max_relative_position = length - 1 vocab_size = 2 * max_relative_position + 1 ret = torch.zeros(length, length, vocab_size) for i in range(length): for x in range(length): v = x - i + max_relative_position if abs(x - i) > max_relative_position: continue ret[i, x, v] = 1 return ret def reindex_2d_einsum_lookup(relative_position_tensor, height: int, width: int, height_lookup: torch.Tensor, width_lookup: torch.Tensor) -> torch.Tensor: reindexed_tensor = torch.einsum('nhw,ixh->nixw', relative_position_tensor, height_lookup) reindexed_tensor = torch.einsum('nixw,jyw->nijxy', reindexed_tensor, width_lookup) area = height * width return reindexed_tensor.reshape(relative_position_tensor.shape[0], area, area) class RelPosBiasTf(nn.Module): def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.num_heads = num_heads vocab_height = 2 * window_size[0] - 1 vocab_width = 2 * window_size[1] - 1 self.bias_shape = (self.num_heads, vocab_height, vocab_width) self.relative_position_bias_table = nn.Parameter(torch.zeros(self.bias_shape)) self.register_buffer('height_lookup', generate_lookup_tensor(window_size[0]), persistent=False) self.register_buffer('width_lookup', generate_lookup_tensor(window_size[1]), persistent=False) self.init_weights() def init_weights(self): nn.init.normal_(self.relative_position_bias_table, std=0.02) def get_bias(self) -> torch.Tensor: return reindex_2d_einsum_lookup(self.relative_position_bias_table, self.window_size[0], self.window_size[1], self.height_lookup, self.width_lookup) def forward(self, attn, shared_rel_pos: Optional[torch.Tensor]=None): return attn + self.get_bias() # File: pytorch-image-models-main/timm/layers/pos_embed_sincos.py """""" import math from typing import List, Tuple, Optional, Union import torch from torch import nn as nn from .grid import ndgrid from .trace_utils import _assert def pixel_freq_bands(num_bands: int, max_freq: float=224.0, linear_bands: bool=True, device: Optional[torch.device]=None): if linear_bands: bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=torch.float32, device=device) else: bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=torch.float32, device=device) return bands * torch.pi def freq_bands(num_bands: int, temperature: float=10000.0, step: int=2, device: Optional[torch.device]=None) -> torch.Tensor: exp = torch.arange(0, num_bands, step, dtype=torch.int64, device=device).to(torch.float32) / num_bands bands = 1.0 / temperature ** exp return bands def build_sincos2d_pos_embed(feat_shape: List[int], dim: int=64, temperature: float=10000.0, reverse_coord: bool=False, interleave_sin_cos: bool=False, dtype: torch.dtype=torch.float32, device: Optional[torch.device]=None) -> torch.Tensor: assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' pos_dim = dim // 4 bands = freq_bands(pos_dim, temperature=temperature, step=1, device=device) if reverse_coord: feat_shape = feat_shape[::-1] grid = torch.stack(ndgrid([torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) for s in feat_shape])).flatten(1).transpose(0, 1) pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) stack_dim = 2 if interleave_sin_cos else 1 pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) return pos_emb.to(dtype=dtype) def build_fourier_pos_embed(feat_shape: List[int], bands: Optional[torch.Tensor]=None, num_bands: int=64, max_res: int=224, temperature: float=10000.0, linear_bands: bool=False, include_grid: bool=False, in_pixels: bool=True, ref_feat_shape: Optional[List[int]]=None, dtype: torch.dtype=torch.float32, device: Optional[torch.device]=None) -> List[torch.Tensor]: if bands is None: if in_pixels: bands = pixel_freq_bands(num_bands, float(max_res), linear_bands=linear_bands, device=device) else: bands = freq_bands(num_bands, temperature=temperature, step=1, device=device) else: if device is None: device = bands.device if dtype is None: dtype = bands.dtype if in_pixels: t = [torch.linspace(-1.0, 1.0, steps=s, device=device, dtype=torch.float32) for s in feat_shape] else: t = [torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) for s in feat_shape] if ref_feat_shape is not None: t = [x / f * r for (x, f, r) in zip(t, feat_shape, ref_feat_shape)] grid = torch.stack(ndgrid(t), dim=-1) grid = grid.unsqueeze(-1) pos = grid * bands (pos_sin, pos_cos) = (pos.sin().to(dtype=dtype), pos.cos().to(dtype)) out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos] return out class FourierEmbed(nn.Module): def __init__(self, max_res: int=224, num_bands: int=64, concat_grid=True, keep_spatial=False): super().__init__() self.max_res = max_res self.num_bands = num_bands self.concat_grid = concat_grid self.keep_spatial = keep_spatial self.register_buffer('bands', pixel_freq_bands(max_res, num_bands), persistent=False) def forward(self, x): (B, C) = x.shape[:2] feat_shape = x.shape[2:] emb = build_fourier_pos_embed(feat_shape, self.bands, include_grid=self.concat_grid, dtype=x.dtype, device=x.device) emb = torch.cat(emb, dim=-1) emb = emb.transpose(-1, -2).flatten(len(feat_shape)) batch_expand = (B,) + (-1,) * (x.ndim - 1) if self.keep_spatial: x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) else: x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) x = x.reshape(B, feat_shape.numel(), -1) return x def rot(x): return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): if isinstance(x, torch.Tensor): x = [x] return [t * cos_emb + rot(t) * sin_emb for t in x] def apply_rot_embed_cat(x: torch.Tensor, emb): (sin_emb, cos_emb) = emb.tensor_split(2, -1) if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_keep_indices_nlc(x, pos_embed, keep_indices): pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1) pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1])) return pos_embed def build_rotary_pos_embed(feat_shape: List[int], bands: Optional[torch.Tensor]=None, dim: int=64, max_res: int=224, temperature: float=10000.0, linear_bands: bool=False, in_pixels: bool=True, ref_feat_shape: Optional[List[int]]=None, dtype: torch.dtype=torch.float32, device: Optional[torch.device]=None): (sin_emb, cos_emb) = build_fourier_pos_embed(feat_shape, bands=bands, num_bands=dim // 4, max_res=max_res, temperature=temperature, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=ref_feat_shape, device=device, dtype=dtype) num_spatial_dim = 1 for x in feat_shape: num_spatial_dim *= x sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) return (sin_emb, cos_emb) class RotaryEmbedding(nn.Module): def __init__(self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool=False, feat_shape: Optional[List[int]]=None, ref_feat_shape: Optional[List[int]]=None): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: if in_pixels: bands = pixel_freq_bands(dim // 4, float(max_res), linear_bands=linear_bands) else: bands = freq_bands(dim // 4, temperature=temperature, step=1) self.register_buffer('bands', bands, persistent=False) self.pos_embed_sin = None self.pos_embed_cos = None else: (emb_sin, emb_cos) = build_rotary_pos_embed(feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape) self.bands = None self.register_buffer('pos_embed_sin', emb_sin, persistent=False) self.register_buffer('pos_embed_cos', emb_cos, persistent=False) def get_embed(self, shape: Optional[List[int]]=None): if self.bands is not None: assert shape is not None return build_rotary_pos_embed(shape, self.bands, in_pixels=self.in_pixels) else: return (self.pos_embed_sin, self.pos_embed_cos) def forward(self, x): (sin_emb, cos_emb) = self.get_embed(x.shape[2:]) return apply_rot_embed(x, sin_emb, cos_emb) class RotaryEmbeddingCat(nn.Module): def __init__(self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool=False, feat_shape: Optional[List[int]]=None, ref_feat_shape: Optional[List[int]]=None): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: if in_pixels: bands = pixel_freq_bands(dim // 4, float(max_res), linear_bands=linear_bands) else: bands = freq_bands(dim // 4, temperature=temperature, step=1) self.register_buffer('bands', bands, persistent=False) self.pos_embed = None else: embeds = build_rotary_pos_embed(feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape) self.bands = None self.register_buffer('pos_embed', torch.cat(embeds, -1), persistent=False) def get_embed(self, shape: Optional[List[int]]=None): if self.bands is not None and shape is not None: embeds = build_rotary_pos_embed(shape, self.bands, in_pixels=self.in_pixels, ref_feat_shape=self.ref_feat_shape) return torch.cat(embeds, -1) elif self.pos_embed is not None: return self.pos_embed else: assert False, 'get_embed() requires pre-computed pos_embed or valid shape w/ pre-computed bands' def forward(self, x): pos_embed = self.get_embed(x.shape[2:]) return apply_rot_embed_cat(x, pos_embed) # File: pytorch-image-models-main/timm/layers/selective_kernel.py """""" import torch from torch import nn as nn from .conv_bn_act import ConvNormAct from .helpers import make_divisible from .trace_utils import _assert def _kernel_valid(k): if isinstance(k, (list, tuple)): for ki in k: return _kernel_valid(ki) assert k >= 3 and k % 2 class SelectiveKernelAttn(nn.Module): def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): super(SelectiveKernelAttn, self).__init__() self.num_paths = num_paths self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) self.bn = norm_layer(attn_channels) self.act = act_layer(inplace=True) self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) def forward(self, x): _assert(x.shape[1] == self.num_paths, '') x = x.sum(1).mean((2, 3), keepdim=True) x = self.fc_reduce(x) x = self.bn(x) x = self.act(x) x = self.fc_select(x) (B, C, H, W) = x.shape x = x.view(B, self.num_paths, C // self.num_paths, H, W) x = torch.softmax(x, dim=1) return x class SelectiveKernel(nn.Module): def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): super(SelectiveKernel, self).__init__() out_channels = out_channels or in_channels kernel_size = kernel_size or [3, 5] _kernel_valid(kernel_size) if not isinstance(kernel_size, list): kernel_size = [kernel_size] * 2 if keep_3x3: dilation = [dilation * (k - 1) // 2 for k in kernel_size] kernel_size = [3] * len(kernel_size) else: dilation = [dilation] * len(kernel_size) self.num_paths = len(kernel_size) self.in_channels = in_channels self.out_channels = out_channels self.split_input = split_input if self.split_input: assert in_channels % self.num_paths == 0 in_channels = in_channels // self.num_paths groups = min(out_channels, groups) conv_kwargs = dict(stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_layer=drop_layer) self.paths = nn.ModuleList([ConvNormAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) for (k, d) in zip(kernel_size, dilation)]) attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) def forward(self, x): if self.split_input: x_split = torch.split(x, self.in_channels // self.num_paths, 1) x_paths = [op(x_split[i]) for (i, op) in enumerate(self.paths)] else: x_paths = [op(x) for op in self.paths] x = torch.stack(x_paths, dim=1) x_attn = self.attn(x) x = x * x_attn x = torch.sum(x, dim=1) return x # File: pytorch-image-models-main/timm/layers/separable_conv.py """""" from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class SeparableConvNormAct(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, apply_act=True, drop_layer=None): super(SeparableConvNormAct, self).__init__() self.conv_dw = create_conv2d(in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d(int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) norm_act_layer = get_norm_act_layer(norm_layer, act_layer) norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) x = self.bn(x) return x SeparableConvBnAct = SeparableConvNormAct class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1): super(SeparableConv2d, self).__init__() self.conv_dw = create_conv2d(in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d(int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) return x # File: pytorch-image-models-main/timm/layers/space_to_depth.py import torch import torch.nn as nn class SpaceToDepth(nn.Module): bs: torch.jit.Final[int] def __init__(self, block_size=4): super().__init__() assert block_size == 4 self.bs = block_size def forward(self, x): (N, C, H, W) = x.size() x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() x = x.view(N, C * self.bs * self.bs, H // self.bs, W // self.bs) return x class DepthToSpace(nn.Module): def __init__(self, block_size): super().__init__() self.bs = block_size def forward(self, x): (N, C, H, W) = x.size() x = x.view(N, self.bs, self.bs, C // self.bs ** 2, H, W) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() x = x.view(N, C // self.bs ** 2, H * self.bs, W * self.bs) return x # File: pytorch-image-models-main/timm/layers/split_attn.py """""" import torch import torch.nn.functional as F from torch import nn from .helpers import make_divisible class RadixSoftmax(nn.Module): def __init__(self, radix, cardinality): super(RadixSoftmax, self).__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttn(nn.Module): def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): super(SplitAttn, self).__init__() out_channels = out_channels or in_channels self.radix = radix mid_chs = out_channels * radix if rd_channels is None: attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) else: attn_chs = rd_channels * radix padding = kernel_size // 2 if padding is None else padding self.conv = nn.Conv2d(in_channels, mid_chs, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act0 = act_layer(inplace=True) self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() self.act1 = act_layer(inplace=True) self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) self.rsoftmax = RadixSoftmax(radix, groups) def forward(self, x): x = self.conv(x) x = self.bn0(x) x = self.drop(x) x = self.act0(x) (B, RC, H, W) = x.shape if self.radix > 1: x = x.reshape((B, self.radix, RC // self.radix, H, W)) x_gap = x.sum(dim=1) else: x_gap = x x_gap = x_gap.mean((2, 3), keepdim=True) x_gap = self.fc1(x_gap) x_gap = self.bn1(x_gap) x_gap = self.act1(x_gap) x_attn = self.fc2(x_gap) x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) if self.radix > 1: out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) else: out = x * x_attn return out.contiguous() # File: pytorch-image-models-main/timm/layers/split_batchnorm.py """""" import torch import torch.nn as nn class SplitBatchNorm2d(torch.nn.BatchNorm2d): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, num_splits=2): super().__init__(num_features, eps, momentum, affine, track_running_stats) assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' self.num_splits = num_splits self.aux_bn = nn.ModuleList([nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) def forward(self, input: torch.Tensor): if self.training: split_size = input.shape[0] // self.num_splits assert input.shape[0] == split_size * self.num_splits, 'batch size must be evenly divisible by num_splits' split_input = input.split(split_size) x = [super().forward(split_input[0])] for (i, a) in enumerate(self.aux_bn): x.append(a(split_input[i + 1])) return torch.cat(x, dim=0) else: return super().forward(input) def convert_splitbn_model(module, num_splits=2): mod = module if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): return module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): mod = SplitBatchNorm2d(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_splits=num_splits) mod.running_mean = module.running_mean mod.running_var = module.running_var mod.num_batches_tracked = module.num_batches_tracked if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for aux in mod.aux_bn: aux.running_mean = module.running_mean.clone() aux.running_var = module.running_var.clone() aux.num_batches_tracked = module.num_batches_tracked.clone() if module.affine: aux.weight.data = module.weight.data.clone().detach() aux.bias.data = module.bias.data.clone().detach() for (name, child) in module.named_children(): mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) del module return mod # File: pytorch-image-models-main/timm/layers/squeeze_excite.py """""" from torch import nn as nn from .create_act import create_act_layer from .helpers import make_divisible class SEModule(nn.Module): def __init__(self, channels, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): super(SEModule, self).__init__() self.add_maxpool = add_maxpool if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.0) self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias) self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc1(x_se) x_se = self.act(self.bn(x_se)) x_se = self.fc2(x_se) return x * self.gate(x_se) SqueezeExcite = SEModule class EffectiveSEModule(nn.Module): def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): super(EffectiveSEModule, self).__init__() self.add_maxpool = add_maxpool self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc(x_se) return x * self.gate(x_se) EffectiveSqueezeExcite = EffectiveSEModule class SqueezeExciteCl(nn.Module): def __init__(self, channels, rd_ratio=1.0 / 16, rd_channels=None, rd_divisor=8, bias=True, act_layer=nn.ReLU, gate_layer='sigmoid'): super().__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.0) self.fc1 = nn.Linear(channels, rd_channels, bias=bias) self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Linear(rd_channels, channels, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((1, 2), keepdims=True) x_se = self.fc1(x_se) x_se = self.act(x_se) x_se = self.fc2(x_se) return x * self.gate(x_se) # File: pytorch-image-models-main/timm/layers/std_conv.py """""" import torch import torch.nn as nn import torch.nn.functional as F from .padding import get_padding, get_padding_value, pad_same class StdConv2d(nn.Conv2d): def __init__(self, in_channel, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1, bias=False, eps=1e-06): if padding is None: padding = get_padding(kernel_size, stride, dilation) super().__init__(in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): weight = F.batch_norm(self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps).reshape_as(self.weight) x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x class StdConv2dSame(nn.Conv2d): def __init__(self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=False, eps=1e-06): (padding, is_dynamic) = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__(in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.same_pad = is_dynamic self.eps = eps def forward(self, x): if self.same_pad: x = pad_same(x, self.kernel_size, self.stride, self.dilation) weight = F.batch_norm(self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps).reshape_as(self.weight) x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x class ScaledStdConv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-06, gain_init=1.0): if padding is None: padding = get_padding(kernel_size, stride, dilation) super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) self.scale = gamma * self.weight[0].numel() ** (-0.5) self.eps = eps def forward(self, x): weight = F.batch_norm(self.weight.reshape(1, self.out_channels, -1), None, None, weight=(self.gain * self.scale).view(-1), training=True, momentum=0.0, eps=self.eps).reshape_as(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class ScaledStdConv2dSame(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-06, gain_init=1.0): (padding, is_dynamic) = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) self.scale = gamma * self.weight[0].numel() ** (-0.5) self.same_pad = is_dynamic self.eps = eps def forward(self, x): if self.same_pad: x = pad_same(x, self.kernel_size, self.stride, self.dilation) weight = F.batch_norm(self.weight.reshape(1, self.out_channels, -1), None, None, weight=(self.gain * self.scale).view(-1), training=True, momentum=0.0, eps=self.eps).reshape_as(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) # File: pytorch-image-models-main/timm/layers/trace_utils.py try: from torch import _assert except ImportError: def _assert(condition: bool, message: str): assert condition, message def _float_to_int(x: float) -> int: return int(x) # File: pytorch-image-models-main/timm/layers/weight_init.py import torch import math import warnings from torch import nn from torch.nn.init import _calculate_fan_in_and_fan_out def _trunc_normal_(tensor, mean, std, a, b): def norm_cdf(x): return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if mean < a - 2 * std or mean > b + 2 * std: warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2) l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) tensor.uniform_(2 * l - 1, 2 * u - 1) tensor.erfinv_() tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): with torch.no_grad(): return _trunc_normal_(tensor, mean, std, a, b) def trunc_normal_tf_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): with torch.no_grad(): _trunc_normal_(tensor, 0, 1.0, a, b) tensor.mul_(std).add_(mean) return tensor def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): (fan_in, fan_out) = _calculate_fan_in_and_fan_out(tensor) if mode == 'fan_in': denom = fan_in elif mode == 'fan_out': denom = fan_out elif mode == 'fan_avg': denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == 'truncated_normal': trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.8796256610342398) elif distribution == 'normal': with torch.no_grad(): tensor.normal_(std=math.sqrt(variance)) elif distribution == 'uniform': bound = math.sqrt(3 * variance) with torch.no_grad(): tensor.uniform_(-bound, bound) else: raise ValueError(f'invalid distribution {distribution}') def lecun_normal_(tensor): variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') def init_weight_vit(module: nn.Module, name: str, init_bias: float=0.02, head_bias: float=0.0, classifier_name: str='head'): if isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d)): if name.startswith(classifier_name): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: nn.init.trunc_normal_(module.weight, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: nn.init.constant_(module.bias, init_bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weight_jax(module: nn.Module, name: str, head_bias: float=0.0, classifier_name: str='head'): if isinstance(module, nn.Linear): if name.startswith(classifier_name): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.normal_(module.bias, std=1e-06) if 'mlp' in name else nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() # File: pytorch-image-models-main/timm/loss/asymmetric_loss.py import torch import torch.nn as nn class AsymmetricLossMultiLabel(nn.Module): def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08, disable_torch_grad_focal_loss=False): super(AsymmetricLossMultiLabel, self).__init__() self.gamma_neg = gamma_neg self.gamma_pos = gamma_pos self.clip = clip self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss self.eps = eps def forward(self, x, y): x_sigmoid = torch.sigmoid(x) xs_pos = x_sigmoid xs_neg = 1 - x_sigmoid if self.clip is not None and self.clip > 0: xs_neg = (xs_neg + self.clip).clamp(max=1) los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) loss = los_pos + los_neg if self.gamma_neg > 0 or self.gamma_pos > 0: if self.disable_torch_grad_focal_loss: torch.set_grad_enabled(False) pt0 = xs_pos * y pt1 = xs_neg * (1 - y) pt = pt0 + pt1 one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) one_sided_w = torch.pow(1 - pt, one_sided_gamma) if self.disable_torch_grad_focal_loss: torch.set_grad_enabled(True) loss *= one_sided_w return -loss.sum() class AsymmetricLossSingleLabel(nn.Module): def __init__(self, gamma_pos=1, gamma_neg=4, eps: float=0.1, reduction='mean'): super(AsymmetricLossSingleLabel, self).__init__() self.eps = eps self.logsoftmax = nn.LogSoftmax(dim=-1) self.targets_classes = [] self.gamma_pos = gamma_pos self.gamma_neg = gamma_neg self.reduction = reduction def forward(self, inputs, target, reduction=None): num_classes = inputs.size()[-1] log_preds = self.logsoftmax(inputs) self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) targets = self.targets_classes anti_targets = 1 - targets xs_pos = torch.exp(log_preds) xs_neg = 1 - xs_pos xs_pos = xs_pos * targets xs_neg = xs_neg * anti_targets asymmetric_w = torch.pow(1 - xs_pos - xs_neg, self.gamma_pos * targets + self.gamma_neg * anti_targets) log_preds = log_preds * asymmetric_w if self.eps > 0: self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes) loss = -self.targets_classes.mul(log_preds) loss = loss.sum(dim=-1) if self.reduction == 'mean': loss = loss.mean() return loss # File: pytorch-image-models-main/timm/loss/binary_cross_entropy.py """""" from typing import Optional, Union import torch import torch.nn as nn import torch.nn.functional as F class BinaryCrossEntropy(nn.Module): def __init__(self, smoothing=0.1, target_threshold: Optional[float]=None, weight: Optional[torch.Tensor]=None, reduction: str='mean', sum_classes: bool=False, pos_weight: Optional[Union[torch.Tensor, float]]=None): super(BinaryCrossEntropy, self).__init__() assert 0.0 <= smoothing < 1.0 if pos_weight is not None: if not isinstance(pos_weight, torch.Tensor): pos_weight = torch.tensor(pos_weight) self.smoothing = smoothing self.target_threshold = target_threshold self.reduction = 'none' if sum_classes else reduction self.sum_classes = sum_classes self.register_buffer('weight', weight) self.register_buffer('pos_weight', pos_weight) def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: batch_size = x.shape[0] assert batch_size == target.shape[0] if target.shape != x.shape: num_classes = x.shape[-1] off_value = self.smoothing / num_classes on_value = 1.0 - self.smoothing + off_value target = target.long().view(-1, 1) target = torch.full((batch_size, num_classes), off_value, device=x.device, dtype=x.dtype).scatter_(1, target, on_value) if self.target_threshold is not None: target = target.gt(self.target_threshold).to(dtype=target.dtype) loss = F.binary_cross_entropy_with_logits(x, target, self.weight, pos_weight=self.pos_weight, reduction=self.reduction) if self.sum_classes: loss = loss.sum(-1).mean() return loss # File: pytorch-image-models-main/timm/loss/cross_entropy.py """""" import torch import torch.nn as nn import torch.nn.functional as F class LabelSmoothingCrossEntropy(nn.Module): def __init__(self, smoothing=0.1): super(LabelSmoothingCrossEntropy, self).__init__() assert smoothing < 1.0 self.smoothing = smoothing self.confidence = 1.0 - smoothing def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: logprobs = F.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean() class SoftTargetCrossEntropy(nn.Module): def __init__(self): super(SoftTargetCrossEntropy, self).__init__() def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) return loss.mean() # File: pytorch-image-models-main/timm/loss/jsd.py import torch import torch.nn as nn import torch.nn.functional as F from .cross_entropy import LabelSmoothingCrossEntropy class JsdCrossEntropy(nn.Module): def __init__(self, num_splits=3, alpha=12, smoothing=0.1): super().__init__() self.num_splits = num_splits self.alpha = alpha if smoothing is not None and smoothing > 0: self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) else: self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def __call__(self, output, target): split_size = output.shape[0] // self.num_splits assert split_size * self.num_splits == output.shape[0] logits_split = torch.split(output, split_size) loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) probs = [F.softmax(logits, dim=1) for logits in logits_split] logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-07, 1).log() loss += self.alpha * sum([F.kl_div(logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) return loss # File: pytorch-image-models-main/timm/models/__init__.py from .beit import * from .byoanet import * from .byobnet import * from .cait import * from .coat import * from .convit import * from .convmixer import * from .convnext import * from .crossvit import * from .cspnet import * from .davit import * from .deit import * from .densenet import * from .dla import * from .dpn import * from .edgenext import * from .efficientformer import * from .efficientformer_v2 import * from .efficientnet import * from .efficientvit_mit import * from .efficientvit_msra import * from .eva import * from .fastvit import * from .focalnet import * from .gcvit import * from .ghostnet import * from .hardcorenas import * from .hgnet import * from .hiera import * from .hieradet_sam2 import * from .hrnet import * from .inception_next import * from .inception_resnet_v2 import * from .inception_v3 import * from .inception_v4 import * from .levit import * from .maxxvit import * from .metaformer import * from .mlp_mixer import * from .mobilenetv3 import * from .mobilevit import * from .mvitv2 import * from .nasnet import * from .nest import * from .nextvit import * from .nfnet import * from .pit import * from .pnasnet import * from .pvt_v2 import * from .rdnet import * from .regnet import * from .repghost import * from .repvit import * from .res2net import * from .resnest import * from .resnet import * from .resnetv2 import * from .rexnet import * from .selecsls import * from .senet import * from .sequencer import * from .sknet import * from .swin_transformer import * from .swin_transformer_v2 import * from .swin_transformer_v2_cr import * from .tiny_vit import * from .tnt import * from .tresnet import * from .twins import * from .vgg import * from .visformer import * from .vision_transformer import * from .vision_transformer_hybrid import * from .vision_transformer_relpos import * from .vision_transformer_sam import * from .vitamin import * from .volo import * from .vovnet import * from .xception import * from .xception_aligned import * from .xcit import * from ._builder import build_model_with_cfg, load_pretrained, load_custom_pretrained, resolve_pretrained_cfg, set_pretrained_download_progress, set_pretrained_check_hash from ._factory import create_model, parse_model_name, safe_model_name from ._features import FeatureInfo, FeatureHooks, FeatureHookNet, FeatureListNet, FeatureDictNet from ._features_fx import FeatureGraphNet, GraphExtractNet, create_feature_extractor, get_graph_node_names, register_notrace_module, is_notrace_module, get_notrace_modules, register_notrace_function, is_notrace_function, get_notrace_functions from ._helpers import clean_state_dict, load_state_dict, load_checkpoint, remap_state_dict, resume_checkpoint from ._hub import load_model_config_from_hf, load_state_dict_from_hf, push_to_hf_hub from ._manipulate import model_parameters, named_apply, named_modules, named_modules_with_params, group_modules, group_parameters, checkpoint_seq, adapt_input_conv from ._pretrained import PretrainedCfg, DefaultCfg, filter_pretrained_cfg from ._prune import adapt_model_from_string from ._registry import split_model_name_tag, get_arch_name, generate_default_cfgs, register_model, register_model_deprecations, model_entrypoint, list_models, list_pretrained, get_deprecated_models, is_model, list_modules, is_model_in_modules, is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value, get_arch_pretrained_cfgs # File: pytorch-image-models-main/timm/models/_builder.py import dataclasses import logging import os from copy import deepcopy from typing import Any, Callable, Dict, List, Optional, Tuple from torch import nn as nn from torch.hub import load_state_dict_from_url from timm.models._features import FeatureListNet, FeatureDictNet, FeatureHookNet, FeatureGetterNet from timm.models._features_fx import FeatureGraphNet from timm.models._helpers import load_state_dict from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf, load_custom_from_hf from timm.models._manipulate import adapt_input_conv from timm.models._pretrained import PretrainedCfg from timm.models._prune import adapt_model_from_file from timm.models._registry import get_pretrained_cfg _logger = logging.getLogger(__name__) _DOWNLOAD_PROGRESS = False _CHECK_HASH = False _USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0 __all__ = ['set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained', 'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg'] def _resolve_pretrained_source(pretrained_cfg): cfg_source = pretrained_cfg.get('source', '') pretrained_url = pretrained_cfg.get('url', None) pretrained_file = pretrained_cfg.get('file', None) pretrained_sd = pretrained_cfg.get('state_dict', None) hf_hub_id = pretrained_cfg.get('hf_hub_id', None) load_from = '' pretrained_loc = '' if cfg_source == 'hf-hub' and has_hf_hub(necessary=True): load_from = 'hf-hub' assert hf_hub_id pretrained_loc = hf_hub_id elif pretrained_sd: load_from = 'state_dict' pretrained_loc = pretrained_sd assert isinstance(pretrained_loc, dict) elif pretrained_file: load_from = 'file' pretrained_loc = pretrained_file else: old_cache_valid = False if _USE_OLD_CACHE: old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True): load_from = 'hf-hub' pretrained_loc = hf_hub_id elif pretrained_url: load_from = 'url' pretrained_loc = pretrained_url if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None): pretrained_loc = (pretrained_loc, pretrained_cfg['hf_hub_filename']) return (load_from, pretrained_loc) def set_pretrained_download_progress(enable=True): global _DOWNLOAD_PROGRESS _DOWNLOAD_PROGRESS = enable def set_pretrained_check_hash(enable=True): global _CHECK_HASH _CHECK_HASH = enable def load_custom_pretrained(model: nn.Module, pretrained_cfg: Optional[Dict]=None, load_fn: Optional[Callable]=None): pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: _logger.warning('Invalid pretrained config, cannot load weights.') return (load_from, pretrained_loc) = _resolve_pretrained_source(pretrained_cfg) if not load_from: _logger.warning('No pretrained weights exist for this model. Using random initialization.') return if load_from == 'hf-hub': _logger.warning('Hugging Face hub not currently supported for custom load pretrained models.') elif load_from == 'url': pretrained_loc = download_cached_file(pretrained_loc, check_hash=_CHECK_HASH, progress=_DOWNLOAD_PROGRESS) if load_fn is not None: load_fn(model, pretrained_loc) elif hasattr(model, 'load_pretrained'): model.load_pretrained(pretrained_loc) else: _logger.warning('Valid function to load pretrained weights is not available, using random initialization.') def load_pretrained(model: nn.Module, pretrained_cfg: Optional[Dict]=None, num_classes: int=1000, in_chans: int=3, filter_fn: Optional[Callable]=None, strict: bool=True): pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: raise RuntimeError('Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.') (load_from, pretrained_loc) = _resolve_pretrained_source(pretrained_cfg) if load_from == 'state_dict': _logger.info(f'Loading pretrained weights from state dict') state_dict = pretrained_loc elif load_from == 'file': _logger.info(f'Loading pretrained weights from file ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): model.load_pretrained(pretrained_loc) return else: state_dict = load_state_dict(pretrained_loc) elif load_from == 'url': _logger.info(f'Loading pretrained weights from url ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): pretrained_loc = download_cached_file(pretrained_loc, progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH) model.load_pretrained(pretrained_loc) return else: try: state_dict = load_state_dict_from_url(pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, weights_only=True) except TypeError: state_dict = load_state_dict_from_url(pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH) elif load_from == 'hf-hub': _logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})') if isinstance(pretrained_loc, (list, tuple)): custom_load = pretrained_cfg.get('custom_load', False) if isinstance(custom_load, str) and custom_load == 'hf': load_custom_from_hf(*pretrained_loc, model) return else: state_dict = load_state_dict_from_hf(*pretrained_loc) else: state_dict = load_state_dict_from_hf(pretrained_loc, weights_only=True) else: model_name = pretrained_cfg.get('architecture', 'this model') raise RuntimeError(f'No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.') if filter_fn is not None: try: state_dict = filter_fn(state_dict, model) except TypeError as e: state_dict = filter_fn(state_dict) input_convs = pretrained_cfg.get('first_conv', None) if input_convs is not None and in_chans != 3: if isinstance(input_convs, str): input_convs = (input_convs,) for input_conv_name in input_convs: weight_name = input_conv_name + '.weight' try: state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) _logger.info(f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') except NotImplementedError as e: del state_dict[weight_name] strict = False _logger.warning(f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') classifiers = pretrained_cfg.get('classifier', None) label_offset = pretrained_cfg.get('label_offset', 0) if classifiers is not None: if isinstance(classifiers, str): classifiers = (classifiers,) if num_classes != pretrained_cfg['num_classes']: for classifier_name in classifiers: state_dict.pop(classifier_name + '.weight', None) state_dict.pop(classifier_name + '.bias', None) strict = False elif label_offset > 0: for classifier_name in classifiers: classifier_weight = state_dict[classifier_name + '.weight'] state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] classifier_bias = state_dict[classifier_name + '.bias'] state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] load_result = model.load_state_dict(state_dict, strict=strict) if load_result.missing_keys: _logger.info(f"Missing keys ({', '.join(load_result.missing_keys)}) discovered while loading pretrained weights. This is expected if model is being adapted.") if load_result.unexpected_keys: _logger.warning(f"Unexpected keys ({', '.join(load_result.unexpected_keys)}) found while loading pretrained weights. This may be expected if model is being adapted.") def pretrained_cfg_for_features(pretrained_cfg): pretrained_cfg = deepcopy(pretrained_cfg) to_remove = ('num_classes', 'classifier', 'global_pool') for tr in to_remove: pretrained_cfg.pop(tr, None) return pretrained_cfg def _filter_kwargs(kwargs, names): if not kwargs or not names: return for n in names: kwargs.pop(n, None) def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter): default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') if pretrained_cfg.get('fixed_input_size', False): default_kwarg_names += ('img_size',) for n in default_kwarg_names: if n == 'img_size': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[-2:]) elif n == 'in_chans': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[0]) elif n == 'num_classes': default_val = pretrained_cfg.get(n, None) if default_val is not None and default_val >= 0: kwargs.setdefault(n, pretrained_cfg[n]) else: default_val = pretrained_cfg.get(n, None) if default_val is not None: kwargs.setdefault(n, pretrained_cfg[n]) _filter_kwargs(kwargs, names=kwargs_filter) def resolve_pretrained_cfg(variant: str, pretrained_cfg=None, pretrained_cfg_overlay=None) -> PretrainedCfg: model_with_tag = variant pretrained_tag = None if pretrained_cfg: if isinstance(pretrained_cfg, dict): pretrained_cfg = PretrainedCfg(**pretrained_cfg) elif isinstance(pretrained_cfg, str): pretrained_tag = pretrained_cfg pretrained_cfg = None if not pretrained_cfg: if pretrained_tag: model_with_tag = '.'.join([variant, pretrained_tag]) pretrained_cfg = get_pretrained_cfg(model_with_tag) if not pretrained_cfg: _logger.warning(f'No pretrained configuration specified for {model_with_tag} model. Using a default. Please add a config to the model pretrained_cfg registry or pass explicitly.') pretrained_cfg = PretrainedCfg() pretrained_cfg_overlay = pretrained_cfg_overlay or {} if not pretrained_cfg.architecture: pretrained_cfg_overlay.setdefault('architecture', variant) pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay) return pretrained_cfg def build_model_with_cfg(model_cls: Callable, variant: str, pretrained: bool, pretrained_cfg: Optional[Dict]=None, pretrained_cfg_overlay: Optional[Dict]=None, model_cfg: Optional[Any]=None, feature_cfg: Optional[Dict]=None, pretrained_strict: bool=True, pretrained_filter_fn: Optional[Callable]=None, kwargs_filter: Optional[Tuple[str]]=None, **kwargs): pruned = kwargs.pop('pruned', False) features = False feature_cfg = feature_cfg or {} pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay) pretrained_cfg = pretrained_cfg.to_dict() _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter) if kwargs.pop('features_only', False): features = True feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) if 'out_indices' in kwargs: feature_cfg['out_indices'] = kwargs.pop('out_indices') if 'feature_cls' in kwargs: feature_cfg['feature_cls'] = kwargs.pop('feature_cls') if model_cfg is None: model = model_cls(**kwargs) else: model = model_cls(cfg=model_cfg, **kwargs) model.pretrained_cfg = pretrained_cfg model.default_cfg = model.pretrained_cfg if pruned: model = adapt_model_from_file(model, variant) num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) if pretrained: load_pretrained(model, pretrained_cfg=pretrained_cfg, num_classes=num_classes_pretrained, in_chans=kwargs.get('in_chans', 3), filter_fn=pretrained_filter_fn, strict=pretrained_strict) if features: use_getter = False if 'feature_cls' in feature_cfg: feature_cls = feature_cfg.pop('feature_cls') if isinstance(feature_cls, str): feature_cls = feature_cls.lower() if feature_cls not in ('dict', 'list', 'hook'): feature_cfg.pop('flatten_sequential', None) if 'hook' in feature_cls: feature_cls = FeatureHookNet elif feature_cls == 'list': feature_cls = FeatureListNet elif feature_cls == 'dict': feature_cls = FeatureDictNet elif feature_cls == 'fx': feature_cls = FeatureGraphNet elif feature_cls == 'getter': use_getter = True feature_cls = FeatureGetterNet else: assert False, f'Unknown feature class {feature_cls}' else: feature_cls = FeatureListNet output_fmt = getattr(model, 'output_fmt', None) if output_fmt is not None and (not use_getter): feature_cfg.setdefault('output_fmt', output_fmt) model = feature_cls(model, **feature_cfg) model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) model.default_cfg = model.pretrained_cfg return model # File: pytorch-image-models-main/timm/models/_efficientnet_blocks.py """""" from typing import Callable, Dict, Optional, Type import torch import torch.nn as nn from torch.nn import functional as F from timm.layers import create_conv2d, DropPath, make_divisible, create_act_layer, create_aa, to_2tuple, LayerType, ConvNormAct, get_norm_act_layer, MultiQueryAttention2d, Attention2d __all__ = ['SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual', 'UniversalInvertedResidual', 'MobileAttention'] ModuleType = Type[nn.Module] def num_groups(group_size: Optional[int], channels: int): if not group_size: return 1 else: assert channels % group_size == 0 return channels // group_size class SqueezeExcite(nn.Module): def __init__(self, in_chs: int, rd_ratio: float=0.25, rd_channels: Optional[int]=None, act_layer: LayerType=nn.ReLU, gate_layer: LayerType=nn.Sigmoid, force_act_layer: Optional[LayerType]=None, rd_round_fn: Optional[Callable]=None): super(SqueezeExcite, self).__init__() if rd_channels is None: rd_round_fn = rd_round_fn or round rd_channels = rd_round_fn(in_chs * rd_ratio) act_layer = force_act_layer or act_layer self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) self.act1 = create_act_layer(act_layer, inplace=True) self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) x_se = self.conv_reduce(x_se) x_se = self.act1(x_se) x_se = self.conv_expand(x_se) return x * self.gate(x_se) class ConvBnAct(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int, stride: int=1, dilation: int=1, group_size: int=0, pad_type: str='', skip: bool=False, act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[LayerType]=None, drop_path_rate: float=0.0): super(ConvBnAct, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) groups = num_groups(group_size, in_chs) self.has_skip = skip and stride == 1 and (in_chs == out_chs) use_aa = aa_layer is not None and stride > 1 self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=1 if use_aa else stride, dilation=dilation, groups=groups, padding=pad_type) self.bn1 = norm_act_layer(out_chs, inplace=True) self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa) self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() def feature_info(self, location): if location == 'expansion': return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels) else: return dict(module='', num_chs=self.conv.out_channels) def forward(self, x): shortcut = x x = self.conv(x) x = self.bn1(x) x = self.aa(x) if self.has_skip: x = self.drop_path(x) + shortcut return x class DepthwiseSeparableConv(nn.Module): def __init__(self, in_chs: int, out_chs: int, dw_kernel_size: int=3, stride: int=1, dilation: int=1, group_size: int=1, pad_type: str='', noskip: bool=False, pw_kernel_size: int=1, pw_act: bool=False, s2d: int=0, act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[LayerType]=None, se_layer: Optional[ModuleType]=None, drop_path_rate: float=0.0): super(DepthwiseSeparableConv, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) self.has_skip = (stride == 1 and in_chs == out_chs) and (not noskip) self.has_pw_act = pw_act use_aa = aa_layer is not None and stride > 1 if s2d == 1: sd_chs = int(in_chs * 4) self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same') self.bn_s2d = norm_act_layer(sd_chs, sd_chs) dw_kernel_size = (dw_kernel_size + 1) // 2 dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type in_chs = sd_chs use_aa = False else: self.conv_s2d = None self.bn_s2d = None dw_pad_type = pad_type groups = num_groups(group_size, in_chs) self.conv_dw = create_conv2d(in_chs, in_chs, dw_kernel_size, stride=1 if use_aa else stride, dilation=dilation, padding=dw_pad_type, groups=groups) self.bn1 = norm_act_layer(in_chs, inplace=True) self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa) self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity() self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) self.bn2 = norm_act_layer(out_chs, inplace=True, apply_act=self.has_pw_act) self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() def feature_info(self, location): if location == 'expansion': return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) else: return dict(module='', num_chs=self.conv_pw.out_channels) def forward(self, x): shortcut = x if self.conv_s2d is not None: x = self.conv_s2d(x) x = self.bn_s2d(x) x = self.conv_dw(x) x = self.bn1(x) x = self.aa(x) x = self.se(x) x = self.conv_pw(x) x = self.bn2(x) if self.has_skip: x = self.drop_path(x) + shortcut return x class InvertedResidual(nn.Module): def __init__(self, in_chs: int, out_chs: int, dw_kernel_size: int=3, stride: int=1, dilation: int=1, group_size: int=1, pad_type: str='', noskip: bool=False, exp_ratio: float=1.0, exp_kernel_size: int=1, pw_kernel_size: int=1, s2d: int=0, act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[LayerType]=None, se_layer: Optional[ModuleType]=None, conv_kwargs: Optional[Dict]=None, drop_path_rate: float=0.0): super(InvertedResidual, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) conv_kwargs = conv_kwargs or {} self.has_skip = (in_chs == out_chs and stride == 1) and (not noskip) use_aa = aa_layer is not None and stride > 1 if s2d == 1: sd_chs = int(in_chs * 4) self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same') self.bn_s2d = norm_act_layer(sd_chs, sd_chs) dw_kernel_size = (dw_kernel_size + 1) // 2 dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type in_chs = sd_chs use_aa = False else: self.conv_s2d = None self.bn_s2d = None dw_pad_type = pad_type mid_chs = make_divisible(in_chs * exp_ratio) groups = num_groups(group_size, mid_chs) self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) self.bn1 = norm_act_layer(mid_chs, inplace=True) self.conv_dw = create_conv2d(mid_chs, mid_chs, dw_kernel_size, stride=1 if use_aa else stride, dilation=dilation, groups=groups, padding=dw_pad_type, **conv_kwargs) self.bn2 = norm_act_layer(mid_chs, inplace=True) self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa) self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) self.bn3 = norm_act_layer(out_chs, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() def feature_info(self, location): if location == 'expansion': return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) else: return dict(module='', num_chs=self.conv_pwl.out_channels) def forward(self, x): shortcut = x if self.conv_s2d is not None: x = self.conv_s2d(x) x = self.bn_s2d(x) x = self.conv_pw(x) x = self.bn1(x) x = self.conv_dw(x) x = self.bn2(x) x = self.aa(x) x = self.se(x) x = self.conv_pwl(x) x = self.bn3(x) if self.has_skip: x = self.drop_path(x) + shortcut return x class LayerScale2d(nn.Module): def __init__(self, dim: int, init_values: float=1e-05, inplace: bool=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class UniversalInvertedResidual(nn.Module): def __init__(self, in_chs: int, out_chs: int, dw_kernel_size_start: int=0, dw_kernel_size_mid: int=3, dw_kernel_size_end: int=0, stride: int=1, dilation: int=1, group_size: int=1, pad_type: str='', noskip: bool=False, exp_ratio: float=1.0, act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[LayerType]=None, se_layer: Optional[ModuleType]=None, conv_kwargs: Optional[Dict]=None, drop_path_rate: float=0.0, layer_scale_init_value: Optional[float]=1e-05): super(UniversalInvertedResidual, self).__init__() conv_kwargs = conv_kwargs or {} self.has_skip = (in_chs == out_chs and stride == 1) and (not noskip) if stride > 1: assert dw_kernel_size_start or dw_kernel_size_mid or dw_kernel_size_end if dw_kernel_size_start: dw_start_stride = stride if not dw_kernel_size_mid else 1 dw_start_groups = num_groups(group_size, in_chs) self.dw_start = ConvNormAct(in_chs, in_chs, dw_kernel_size_start, stride=dw_start_stride, dilation=dilation, groups=dw_start_groups, padding=pad_type, apply_act=False, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, **conv_kwargs) else: self.dw_start = nn.Identity() mid_chs = make_divisible(in_chs * exp_ratio) self.pw_exp = ConvNormAct(in_chs, mid_chs, 1, padding=pad_type, act_layer=act_layer, norm_layer=norm_layer, **conv_kwargs) if dw_kernel_size_mid: groups = num_groups(group_size, mid_chs) self.dw_mid = ConvNormAct(mid_chs, mid_chs, dw_kernel_size_mid, stride=stride, dilation=dilation, groups=groups, padding=pad_type, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, **conv_kwargs) else: self.dw_mid = nn.Identity() self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() self.pw_proj = ConvNormAct(mid_chs, out_chs, 1, padding=pad_type, apply_act=False, act_layer=act_layer, norm_layer=norm_layer, **conv_kwargs) if dw_kernel_size_end: dw_end_stride = stride if not dw_kernel_size_start and (not dw_kernel_size_mid) else 1 dw_end_groups = num_groups(group_size, out_chs) if dw_end_stride > 1: assert not aa_layer self.dw_end = ConvNormAct(out_chs, out_chs, dw_kernel_size_end, stride=dw_end_stride, dilation=dilation, groups=dw_end_groups, padding=pad_type, apply_act=False, act_layer=act_layer, norm_layer=norm_layer, **conv_kwargs) else: self.dw_end = nn.Identity() if layer_scale_init_value is not None: self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value) else: self.layer_scale = nn.Identity() self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() def feature_info(self, location): if location == 'expansion': return dict(module='pw_proj.conv', hook_type='forward_pre', num_chs=self.pw_proj.conv.in_channels) else: return dict(module='', num_chs=self.pw_proj.conv.out_channels) def forward(self, x): shortcut = x x = self.dw_start(x) x = self.pw_exp(x) x = self.dw_mid(x) x = self.se(x) x = self.pw_proj(x) x = self.dw_end(x) x = self.layer_scale(x) if self.has_skip: x = self.drop_path(x) + shortcut return x class MobileAttention(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=1, dw_kernel_size: int=3, dilation: int=1, group_size: int=1, pad_type: str='', num_heads: int=8, key_dim: int=64, value_dim: int=64, use_multi_query: bool=False, query_strides: int=(1, 1), kv_stride: int=1, cpe_dw_kernel_size: int=3, noskip: bool=False, act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[LayerType]=None, drop_path_rate: float=0.0, attn_drop: float=0.0, proj_drop: float=0.0, layer_scale_init_value: Optional[float]=1e-05, use_bias: bool=False, use_cpe: bool=False): super(MobileAttention, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) self.has_skip = (stride == 1 and in_chs == out_chs) and (not noskip) self.query_strides = to_2tuple(query_strides) self.kv_stride = kv_stride self.has_query_stride = any([s > 1 for s in self.query_strides]) if use_cpe: self.conv_cpe_dw = create_conv2d(in_chs, in_chs, kernel_size=cpe_dw_kernel_size, dilation=dilation, depthwise=True, bias=True) else: self.conv_cpe_dw = None self.norm = norm_act_layer(in_chs, apply_act=False) if num_heads is None: assert in_chs % key_dim == 0 num_heads = in_chs // key_dim if use_multi_query: self.attn = MultiQueryAttention2d(in_chs, dim_out=out_chs, num_heads=num_heads, key_dim=key_dim, value_dim=value_dim, query_strides=query_strides, kv_stride=kv_stride, dilation=dilation, padding=pad_type, dw_kernel_size=dw_kernel_size, attn_drop=attn_drop, proj_drop=proj_drop) else: self.attn = Attention2d(in_chs, dim_out=out_chs, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, bias=use_bias) if layer_scale_init_value is not None: self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value) else: self.layer_scale = nn.Identity() self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() def feature_info(self, location): if location == 'expansion': return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) else: return dict(module='', num_chs=self.conv_pw.out_channels) def forward(self, x): if self.conv_cpe_dw is not None: x_cpe = self.conv_cpe_dw(x) x = x + x_cpe shortcut = x x = self.norm(x) x = self.attn(x) x = self.layer_scale(x) if self.has_skip: x = self.drop_path(x) + shortcut return x class CondConvResidual(InvertedResidual): def __init__(self, in_chs: int, out_chs: int, dw_kernel_size: int=3, stride: int=1, dilation: int=1, group_size: int=1, pad_type: str='', noskip: bool=False, exp_ratio: float=1.0, exp_kernel_size: int=1, pw_kernel_size: int=1, act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[LayerType]=None, se_layer: Optional[ModuleType]=None, num_experts: int=0, drop_path_rate: float=0.0): self.num_experts = num_experts conv_kwargs = dict(num_experts=self.num_experts) super(CondConvResidual, self).__init__(in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, group_size=group_size, pad_type=pad_type, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, conv_kwargs=conv_kwargs, drop_path_rate=drop_path_rate) self.routing_fn = nn.Linear(in_chs, self.num_experts) def forward(self, x): shortcut = x pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) x = self.conv_pw(x, routing_weights) x = self.bn1(x) x = self.conv_dw(x, routing_weights) x = self.bn2(x) x = self.se(x) x = self.conv_pwl(x, routing_weights) x = self.bn3(x) if self.has_skip: x = self.drop_path(x) + shortcut return x class EdgeResidual(nn.Module): def __init__(self, in_chs: int, out_chs: int, exp_kernel_size: int=3, stride: int=1, dilation: int=1, group_size: int=0, pad_type: str='', force_in_chs: int=0, noskip: bool=False, exp_ratio: float=1.0, pw_kernel_size: int=1, act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[LayerType]=None, se_layer: Optional[ModuleType]=None, drop_path_rate: float=0.0): super(EdgeResidual, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) if force_in_chs > 0: mid_chs = make_divisible(force_in_chs * exp_ratio) else: mid_chs = make_divisible(in_chs * exp_ratio) groups = num_groups(group_size, mid_chs) self.has_skip = (in_chs == out_chs and stride == 1) and (not noskip) use_aa = aa_layer is not None and stride > 1 self.conv_exp = create_conv2d(in_chs, mid_chs, exp_kernel_size, stride=1 if use_aa else stride, dilation=dilation, groups=groups, padding=pad_type) self.bn1 = norm_act_layer(mid_chs, inplace=True) self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa) self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) self.bn2 = norm_act_layer(out_chs, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() def feature_info(self, location): if location == 'expansion': return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) else: return dict(module='', num_chs=self.conv_pwl.out_channels) def forward(self, x): shortcut = x x = self.conv_exp(x) x = self.bn1(x) x = self.aa(x) x = self.se(x) x = self.conv_pwl(x) x = self.bn2(x) if self.has_skip: x = self.drop_path(x) + shortcut return x # File: pytorch-image-models-main/timm/models/_efficientnet_builder.py """""" from typing import Callable, Optional import logging import math import re from copy import deepcopy from functools import partial from typing import Any, Dict, List import torch.nn as nn from timm.layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible, LayerType from ._efficientnet_blocks import * from ._manipulate import named_modules __all__ = ['EfficientNetBuilder', 'decode_arch_def', 'efficientnet_init_weights', 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] _logger = logging.getLogger(__name__) _DEBUG_BUILDER = False BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 BN_EPS_TF_DEFAULT = 0.001 _BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) BlockArgs = List[List[Dict[str, Any]]] def get_bn_args_tf(): return _BN_ARGS_TF.copy() def resolve_bn_args(kwargs): bn_args = {} bn_momentum = kwargs.pop('bn_momentum', None) if bn_momentum is not None: bn_args['momentum'] = bn_momentum bn_eps = kwargs.pop('bn_eps', None) if bn_eps is not None: bn_args['eps'] = bn_eps return bn_args def resolve_act_layer(kwargs, default='relu'): return get_act_layer(kwargs.pop('act_layer', default)) def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): if not multiplier: return channels return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) def _log_info_if(msg, condition): if condition: _logger.info(msg) def _parse_ksize(ss): if ss.isdigit(): return int(ss) else: return [int(k) for k in ss.split('.')] def _decode_block_str(block_str): assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] ops = ops[1:] options = {} skip = None for op in ops: if op == 'noskip': skip = False elif op == 'skip': skip = True elif op.startswith('n'): key = op[0] v = op[1:] if v == 're': value = get_act_layer('relu') elif v == 'r6': value = get_act_layer('relu6') elif v == 'hs': value = get_act_layer('hard_swish') elif v == 'sw': value = get_act_layer('swish') elif v == 'mi': value = get_act_layer('mish') else: continue options[key] = value else: splits = re.split('(\\d.*)', op) if len(splits) >= 2: (key, value) = splits[:2] options[key] = value act_layer = options['n'] if 'n' in options else None start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 force_in_chs = int(options['fc']) if 'fc' in options else 0 num_repeat = int(options['r']) block_args = dict(block_type=block_type, out_chs=int(options['c']), stride=int(options['s']), act_layer=act_layer) if block_type == 'ir': block_args.update(dict(dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=start_kernel_size, pw_kernel_size=end_kernel_size, exp_ratio=float(options['e']), se_ratio=float(options.get('se', 0.0)), noskip=skip is False, s2d=int(options.get('d', 0)) > 0)) if 'cc' in options: block_args['num_experts'] = int(options['cc']) elif block_type == 'ds' or block_type == 'dsa': block_args.update(dict(dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=end_kernel_size, se_ratio=float(options.get('se', 0.0)), pw_act=block_type == 'dsa', noskip=block_type == 'dsa' or skip is False, s2d=int(options.get('d', 0)) > 0)) elif block_type == 'er': block_args.update(dict(exp_kernel_size=_parse_ksize(options['k']), pw_kernel_size=end_kernel_size, exp_ratio=float(options['e']), force_in_chs=force_in_chs, se_ratio=float(options.get('se', 0.0)), noskip=skip is False)) elif block_type == 'cn': block_args.update(dict(kernel_size=int(options['k']), skip=skip is True)) elif block_type == 'uir': start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 0 end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 0 block_args.update(dict(dw_kernel_size_start=start_kernel_size, dw_kernel_size_mid=_parse_ksize(options['k']), dw_kernel_size_end=end_kernel_size, exp_ratio=float(options['e']), se_ratio=float(options.get('se', 0.0)), noskip=skip is False)) elif block_type == 'mha': kv_dim = int(options['d']) block_args.update(dict(dw_kernel_size=_parse_ksize(options['k']), num_heads=int(options['h']), key_dim=kv_dim, value_dim=kv_dim, kv_stride=int(options.get('v', 1)), noskip=skip is False)) elif block_type == 'mqa': kv_dim = int(options['d']) block_args.update(dict(dw_kernel_size=_parse_ksize(options['k']), num_heads=int(options['h']), key_dim=kv_dim, value_dim=kv_dim, kv_stride=int(options.get('v', 1)), noskip=skip is False)) else: assert False, 'Unknown block type (%s)' % block_type if 'gs' in options: block_args['group_size'] = int(options['gs']) return (block_args, num_repeat) def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): num_repeat = sum(repeats) if depth_trunc == 'round': num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) else: num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) repeats_scaled = [] for r in repeats[::-1]: rs = max(1, round(r / num_repeat * num_repeat_scaled)) repeats_scaled.append(rs) num_repeat -= r num_repeat_scaled -= rs repeats_scaled = repeats_scaled[::-1] sa_scaled = [] for (ba, rep) in zip(stack_args, repeats_scaled): sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) return sa_scaled def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False, group_size=None): arch_args = [] if isinstance(depth_multiplier, tuple): assert len(depth_multiplier) == len(arch_def) else: depth_multiplier = (depth_multiplier,) * len(arch_def) for (stack_idx, (block_strings, multiplier)) in enumerate(zip(arch_def, depth_multiplier)): assert isinstance(block_strings, list) stack_args = [] repeats = [] for block_str in block_strings: assert isinstance(block_str, str) (ba, rep) = _decode_block_str(block_str) if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: ba['num_experts'] *= experts_multiplier if group_size is not None: ba.setdefault('group_size', group_size) stack_args.append(ba) repeats.append(rep) if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) else: arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) return arch_args class EfficientNetBuilder: def __init__(self, output_stride: int=32, pad_type: str='', round_chs_fn: Callable=round_channels, se_from_exp: bool=False, act_layer: Optional[LayerType]=None, norm_layer: Optional[LayerType]=None, aa_layer: Optional[LayerType]=None, se_layer: Optional[LayerType]=None, drop_path_rate: float=0.0, layer_scale_init_value: Optional[float]=None, feature_location: str=''): self.output_stride = output_stride self.pad_type = pad_type self.round_chs_fn = round_chs_fn self.se_from_exp = se_from_exp self.act_layer = act_layer self.norm_layer = norm_layer self.aa_layer = aa_layer self.se_layer = get_attn(se_layer) try: self.se_layer(8, rd_ratio=1.0) self.se_has_ratio = True except TypeError: self.se_has_ratio = False self.drop_path_rate = drop_path_rate self.layer_scale_init_value = layer_scale_init_value if feature_location == 'depthwise': _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") feature_location = 'expansion' self.feature_location = feature_location assert feature_location in ('bottleneck', 'expansion', '') self.verbose = _DEBUG_BUILDER self.in_chs = None self.features = [] def _make_block(self, ba, block_idx, block_count): drop_path_rate = self.drop_path_rate * block_idx / block_count bt = ba.pop('block_type') ba['in_chs'] = self.in_chs ba['out_chs'] = self.round_chs_fn(ba['out_chs']) s2d = ba.get('s2d', 0) if s2d > 0: ba['out_chs'] *= 4 if 'force_in_chs' in ba and ba['force_in_chs']: ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) ba['pad_type'] = self.pad_type ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer assert ba['act_layer'] is not None ba['norm_layer'] = self.norm_layer ba['drop_path_rate'] = drop_path_rate if self.aa_layer is not None: ba['aa_layer'] = self.aa_layer se_ratio = ba.pop('se_ratio', None) if se_ratio and self.se_layer is not None: if not self.se_from_exp: se_ratio /= ba.get('exp_ratio', 1.0) if s2d == 1: se_ratio /= 4 if self.se_has_ratio: ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) else: ba['se_layer'] = self.se_layer if bt == 'ir': _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) elif bt == 'ds' or bt == 'dsa': _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = DepthwiseSeparableConv(**ba) elif bt == 'er': _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = EdgeResidual(**ba) elif bt == 'cn': _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = ConvBnAct(**ba) elif bt == 'uir': _log_info_if(' UniversalInvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = UniversalInvertedResidual(**ba, layer_scale_init_value=self.layer_scale_init_value) elif bt == 'mqa': _log_info_if(' MobileMultiQueryAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = MobileAttention(**ba, use_multi_query=True, layer_scale_init_value=self.layer_scale_init_value) elif bt == 'mha': _log_info_if(' MobileMultiHeadAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = MobileAttention(**ba, layer_scale_init_value=self.layer_scale_init_value) else: assert False, 'Unknown block type (%s) while building model.' % bt self.in_chs = ba['out_chs'] return block def __call__(self, in_chs, model_block_args): _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) self.in_chs = in_chs total_block_count = sum([len(x) for x in model_block_args]) total_block_idx = 0 current_stride = 2 current_dilation = 1 stages = [] if model_block_args[0][0]['stride'] > 1: feature_info = dict(module='bn1', num_chs=in_chs, stage=0, reduction=current_stride) self.features.append(feature_info) space2depth = 0 for (stack_idx, stack_args) in enumerate(model_block_args): last_stack = stack_idx + 1 == len(model_block_args) _log_info_if('Stack: {}'.format(stack_idx), self.verbose) assert isinstance(stack_args, list) blocks = [] for (block_idx, block_args) in enumerate(stack_args): last_block = block_idx + 1 == len(stack_args) _log_info_if(' Block: {}'.format(block_idx), self.verbose) assert block_args['stride'] in (1, 2) if block_idx >= 1: block_args['stride'] = 1 if not space2depth and block_args.pop('s2d', False): assert block_args['stride'] == 1 space2depth = 1 if space2depth > 0: if space2depth == 2 and block_args['stride'] == 2: block_args['stride'] = 1 block_args['exp_ratio'] /= 4 space2depth = 0 else: block_args['s2d'] = space2depth extract_features = False if last_block: next_stack_idx = stack_idx + 1 extract_features = next_stack_idx >= len(model_block_args) or model_block_args[next_stack_idx][0]['stride'] > 1 next_dilation = current_dilation if block_args['stride'] > 1: next_output_stride = current_stride * block_args['stride'] if next_output_stride > self.output_stride: next_dilation = current_dilation * block_args['stride'] block_args['stride'] = 1 _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format(self.output_stride), self.verbose) else: current_stride = next_output_stride block_args['dilation'] = current_dilation if next_dilation != current_dilation: current_dilation = next_dilation block = self._make_block(block_args, total_block_idx, total_block_count) blocks.append(block) if space2depth == 1: space2depth = 2 if extract_features: feature_info = dict(stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location)) leaf_name = feature_info.get('module', '') if leaf_name: feature_info['module'] = '.'.join([f'blocks.{stack_idx}.{block_idx}', leaf_name]) else: assert last_block feature_info['module'] = f'blocks.{stack_idx}' self.features.append(feature_info) total_block_idx += 1 stages.append(nn.Sequential(*blocks)) return stages def _init_weight_goog(m, n='', fix_group_fanout=True): if isinstance(m, CondConv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups init_weight_fn = get_condconv_initializer(lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) init_weight_fn(m.weight) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): fan_out = m.weight.size(0) fan_in = 0 if 'routing_fn' in n: fan_in = m.weight.size(1) init_range = 1.0 / math.sqrt(fan_in + fan_out) nn.init.uniform_(m.weight, -init_range, init_range) nn.init.zeros_(m.bias) def efficientnet_init_weights(model: nn.Module, init_fn=None): init_fn = init_fn or _init_weight_goog for (n, m) in model.named_modules(): init_fn(m, n) for (n, m) in named_modules(model): if hasattr(m, 'init_weights'): m.init_weights() # File: pytorch-image-models-main/timm/models/_factory.py import os from typing import Any, Dict, Optional, Union from urllib.parse import urlsplit from timm.layers import set_layer_config from ._helpers import load_checkpoint from ._hub import load_model_config_from_hf from ._pretrained import PretrainedCfg from ._registry import is_model, model_entrypoint, split_model_name_tag __all__ = ['parse_model_name', 'safe_model_name', 'create_model'] def parse_model_name(model_name: str): if model_name.startswith('hf_hub'): model_name = model_name.replace('hf_hub', 'hf-hub') parsed = urlsplit(model_name) assert parsed.scheme in ('', 'timm', 'hf-hub') if parsed.scheme == 'hf-hub': return (parsed.scheme, parsed.path) else: model_name = os.path.split(parsed.path)[-1] return ('timm', model_name) def safe_model_name(model_name: str, remove_source: bool=True): def make_safe(name): return ''.join((c if c.isalnum() else '_' for c in name)).rstrip('_') if remove_source: model_name = parse_model_name(model_name)[-1] return make_safe(model_name) def create_model(model_name: str, pretrained: bool=False, pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]]=None, pretrained_cfg_overlay: Optional[Dict[str, Any]]=None, checkpoint_path: str='', scriptable: Optional[bool]=None, exportable: Optional[bool]=None, no_jit: Optional[bool]=None, **kwargs): kwargs = {k: v for (k, v) in kwargs.items() if v is not None} (model_source, model_name) = parse_model_name(model_name) if model_source == 'hf-hub': assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.' (pretrained_cfg, model_name, model_args) = load_model_config_from_hf(model_name) if model_args: for (k, v) in model_args.items(): kwargs.setdefault(k, v) else: (model_name, pretrained_tag) = split_model_name_tag(model_name) if pretrained_tag and (not pretrained_cfg): pretrained_cfg = pretrained_tag if not is_model(model_name): raise RuntimeError('Unknown model (%s)' % model_name) create_fn = model_entrypoint(model_name) with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): model = create_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay, **kwargs) if checkpoint_path: load_checkpoint(model, checkpoint_path) return model # File: pytorch-image-models-main/timm/models/_features.py """""" from collections import OrderedDict, defaultdict from copy import deepcopy from functools import partial from typing import Dict, List, Optional, Sequence, Tuple, Union import torch import torch.nn as nn from torch.utils.checkpoint import checkpoint from timm.layers import Format, _assert __all__ = ['FeatureInfo', 'FeatureHooks', 'FeatureDictNet', 'FeatureListNet', 'FeatureHookNet', 'FeatureGetterNet', 'feature_take_indices'] def feature_take_indices(num_features: int, indices: Optional[Union[int, List[int]]]=None, as_set: bool=False) -> Tuple[List[int], int]: if indices is None: indices = num_features if isinstance(indices, int): _assert(0 < indices <= num_features, f'last-n ({indices}) is out of range (1 to {num_features})') take_indices = [num_features - indices + i for i in range(indices)] else: take_indices: List[int] = [] for i in indices: idx = num_features + i if i < 0 else i _assert(0 <= idx < num_features, f'feature index {idx} is out of range (0 to {num_features - 1})') take_indices.append(idx) if not torch.jit.is_scripting() and as_set: return (set(take_indices), max(take_indices)) return (take_indices, max(take_indices)) def _out_indices_as_tuple(x: Union[int, Tuple[int, ...]]) -> Tuple[int, ...]: if isinstance(x, int): return tuple(range(-x, 0)) return tuple(x) OutIndicesT = Union[int, Tuple[int, ...]] class FeatureInfo: def __init__(self, feature_info: List[Dict], out_indices: OutIndicesT): out_indices = _out_indices_as_tuple(out_indices) prev_reduction = 1 for (i, fi) in enumerate(feature_info): assert 'num_chs' in fi and fi['num_chs'] > 0 assert 'reduction' in fi and fi['reduction'] >= prev_reduction prev_reduction = fi['reduction'] assert 'module' in fi fi.setdefault('index', i) self.out_indices = out_indices self.info = feature_info def from_other(self, out_indices: OutIndicesT): out_indices = _out_indices_as_tuple(out_indices) return FeatureInfo(deepcopy(self.info), out_indices) def get(self, key: str, idx: Optional[Union[int, List[int]]]=None): if idx is None: return [self.info[i][key] for i in self.out_indices] if isinstance(idx, (tuple, list)): return [self.info[i][key] for i in idx] else: return self.info[idx][key] def get_dicts(self, keys: Optional[List[str]]=None, idx: Optional[Union[int, List[int]]]=None): if idx is None: if keys is None: return [self.info[i] for i in self.out_indices] else: return [{k: self.info[i][k] for k in keys} for i in self.out_indices] if isinstance(idx, (tuple, list)): return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] else: return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} def channels(self, idx: Optional[Union[int, List[int]]]=None): return self.get('num_chs', idx) def reduction(self, idx: Optional[Union[int, List[int]]]=None): return self.get('reduction', idx) def module_name(self, idx: Optional[Union[int, List[int]]]=None): return self.get('module', idx) def __getitem__(self, item): return self.info[item] def __len__(self): return len(self.info) class FeatureHooks: def __init__(self, hooks: Sequence[Union[str, Dict]], named_modules: dict, out_map: Sequence[Union[int, str]]=None, default_hook_type: str='forward'): self._feature_outputs = defaultdict(OrderedDict) self._handles = [] modules = {k: v for (k, v) in named_modules} for (i, h) in enumerate(hooks): hook_name = h if isinstance(h, str) else h['module'] m = modules[hook_name] hook_id = out_map[i] if out_map else hook_name hook_fn = partial(self._collect_output_hook, hook_id) hook_type = default_hook_type if isinstance(h, dict): hook_type = h.get('hook_type', default_hook_type) if hook_type == 'forward_pre': handle = m.register_forward_pre_hook(hook_fn) elif hook_type == 'forward': handle = m.register_forward_hook(hook_fn) else: assert False, 'Unsupported hook type' self._handles.append(handle) def _collect_output_hook(self, hook_id, *args): x = args[-1] if isinstance(x, tuple): x = x[0] self._feature_outputs[x.device][hook_id] = x def get_output(self, device) -> Dict[str, torch.tensor]: output = self._feature_outputs[device] self._feature_outputs[device] = OrderedDict() return output def _module_list(module, flatten_sequential=False): ml = [] for (name, module) in module.named_children(): if flatten_sequential and isinstance(module, nn.Sequential): for (child_name, child_module) in module.named_children(): combined = [name, child_name] ml.append(('_'.join(combined), '.'.join(combined), child_module)) else: ml.append((name, name, module)) return ml def _get_feature_info(net, out_indices: OutIndicesT): feature_info = getattr(net, 'feature_info') if isinstance(feature_info, FeatureInfo): return feature_info.from_other(out_indices) elif isinstance(feature_info, (list, tuple)): return FeatureInfo(net.feature_info, out_indices) else: assert False, 'Provided feature_info is not valid' def _get_return_layers(feature_info, out_map): module_names = feature_info.module_name() return_layers = {} for (i, name) in enumerate(module_names): return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] return return_layers class FeatureDictNet(nn.ModuleDict): def __init__(self, model: nn.Module, out_indices: OutIndicesT=(0, 1, 2, 3, 4), out_map: Sequence[Union[int, str]]=None, output_fmt: str='NCHW', feature_concat: bool=False, flatten_sequential: bool=False): super(FeatureDictNet, self).__init__() self.feature_info = _get_feature_info(model, out_indices) self.output_fmt = Format(output_fmt) self.concat = feature_concat self.grad_checkpointing = False self.return_layers = {} return_layers = _get_return_layers(self.feature_info, out_map) modules = _module_list(model, flatten_sequential=flatten_sequential) remaining = set(return_layers.keys()) layers = OrderedDict() for (new_name, old_name, module) in modules: layers[new_name] = module if old_name in remaining: self.return_layers[new_name] = str(return_layers[old_name]) remaining.remove(old_name) if not remaining: break assert not remaining and len(self.return_layers) == len(return_layers), f'Return layers ({remaining}) are not present in model' self.update(layers) def set_grad_checkpointing(self, enable: bool=True): self.grad_checkpointing = enable def _collect(self, x) -> Dict[str, torch.Tensor]: out = OrderedDict() for (i, (name, module)) in enumerate(self.items()): if self.grad_checkpointing and (not torch.jit.is_scripting()): first_or_last_module = i == 0 or i == max(len(self) - 1, 0) x = module(x) if first_or_last_module else checkpoint(module, x) else: x = module(x) if name in self.return_layers: out_id = self.return_layers[name] if isinstance(x, (tuple, list)): out[out_id] = torch.cat(x, 1) if self.concat else x[0] else: out[out_id] = x return out def forward(self, x) -> Dict[str, torch.Tensor]: return self._collect(x) class FeatureListNet(FeatureDictNet): def __init__(self, model: nn.Module, out_indices: OutIndicesT=(0, 1, 2, 3, 4), output_fmt: str='NCHW', feature_concat: bool=False, flatten_sequential: bool=False): super().__init__(model, out_indices=out_indices, output_fmt=output_fmt, feature_concat=feature_concat, flatten_sequential=flatten_sequential) def forward(self, x) -> List[torch.Tensor]: return list(self._collect(x).values()) class FeatureHookNet(nn.ModuleDict): def __init__(self, model: nn.Module, out_indices: OutIndicesT=(0, 1, 2, 3, 4), out_map: Optional[Sequence[Union[int, str]]]=None, return_dict: bool=False, output_fmt: str='NCHW', no_rewrite: Optional[bool]=None, flatten_sequential: bool=False, default_hook_type: str='forward'): super().__init__() assert not torch.jit.is_scripting() self.feature_info = _get_feature_info(model, out_indices) self.return_dict = return_dict self.output_fmt = Format(output_fmt) self.grad_checkpointing = False if no_rewrite is None: no_rewrite = not flatten_sequential layers = OrderedDict() hooks = [] if no_rewrite: assert not flatten_sequential if hasattr(model, 'reset_classifier'): model.reset_classifier(0) layers['body'] = model hooks.extend(self.feature_info.get_dicts()) else: modules = _module_list(model, flatten_sequential=flatten_sequential) remaining = {f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type for f in self.feature_info.get_dicts()} for (new_name, old_name, module) in modules: layers[new_name] = module for (fn, fm) in module.named_modules(prefix=old_name): if fn in remaining: hooks.append(dict(module=fn, hook_type=remaining[fn])) del remaining[fn] if not remaining: break assert not remaining, f'Return layers ({remaining}) are not present in model' self.update(layers) self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) def set_grad_checkpointing(self, enable: bool=True): self.grad_checkpointing = enable def forward(self, x): for (i, (name, module)) in enumerate(self.items()): if self.grad_checkpointing and (not torch.jit.is_scripting()): first_or_last_module = i == 0 or i == max(len(self) - 1, 0) x = module(x) if first_or_last_module else checkpoint(module, x) else: x = module(x) out = self.hooks.get_output(x.device) return out if self.return_dict else list(out.values()) class FeatureGetterNet(nn.ModuleDict): def __init__(self, model: nn.Module, out_indices: OutIndicesT=4, out_map: Optional[Sequence[Union[int, str]]]=None, return_dict: bool=False, output_fmt: str='NCHW', norm: bool=False, prune: bool=True): super().__init__() if prune and hasattr(model, 'prune_intermediate_layers'): out_indices = model.prune_intermediate_layers(out_indices, prune_norm=not norm) self.feature_info = _get_feature_info(model, out_indices) self.model = model self.out_indices = out_indices self.out_map = out_map self.return_dict = return_dict self.output_fmt = Format(output_fmt) self.norm = norm def forward(self, x): features = self.model.forward_intermediates(x, indices=self.out_indices, norm=self.norm, output_fmt=self.output_fmt, intermediates_only=True) return features # File: pytorch-image-models-main/timm/models/_features_fx.py """""" from typing import Callable, Dict, List, Optional, Union, Tuple, Type import torch from torch import nn from ._features import _get_feature_info, _get_return_layers try: from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor from torchvision.models.feature_extraction import get_graph_node_names as _get_graph_node_names has_fx_feature_extraction = True except ImportError: has_fx_feature_extraction = False from timm.layers import Conv2dSame, ScaledStdConv2dSame, CondConv2d, StdConv2dSame, Format from timm.layers import resample_abs_pos_embed, resample_abs_pos_embed_nhwc from timm.layers.non_local_attn import BilinearAttnTransform from timm.layers.pool2d_same import MaxPool2dSame, AvgPool2dSame from timm.layers.norm_act import BatchNormAct2d, SyncBatchNormAct, FrozenBatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d __all__ = ['register_notrace_module', 'is_notrace_module', 'get_notrace_modules', 'register_notrace_function', 'is_notrace_function', 'get_notrace_functions', 'create_feature_extractor', 'get_graph_node_names', 'FeatureGraphNet', 'GraphExtractNet'] _leaf_modules = {BilinearAttnTransform, Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame, CondConv2d, BatchNormAct2d, SyncBatchNormAct, FrozenBatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d} try: from timm.layers import InplaceAbn _leaf_modules.add(InplaceAbn) except ImportError: pass def register_notrace_module(module: Type[nn.Module]): _leaf_modules.add(module) return module def is_notrace_module(module: Type[nn.Module]): return module in _leaf_modules def get_notrace_modules(): return list(_leaf_modules) _autowrap_functions = {resample_abs_pos_embed, resample_abs_pos_embed_nhwc} def register_notrace_function(func: Callable): _autowrap_functions.add(func) return func def is_notrace_function(func: Callable): return func in _autowrap_functions def get_notrace_functions(): return list(_autowrap_functions) def get_graph_node_names(model: nn.Module) -> Tuple[List[str], List[str]]: return _get_graph_node_names(model, tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}) def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]): assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' return _create_feature_extractor(model, return_nodes, tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}) class FeatureGraphNet(nn.Module): return_dict: torch.jit.Final[bool] def __init__(self, model: nn.Module, out_indices: Tuple[int, ...], out_map: Optional[Dict]=None, output_fmt: str='NCHW', return_dict: bool=False): super().__init__() assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' self.feature_info = _get_feature_info(model, out_indices) if out_map is not None: assert len(out_map) == len(out_indices) self.output_fmt = Format(output_fmt) return_nodes = _get_return_layers(self.feature_info, out_map) self.graph_module = create_feature_extractor(model, return_nodes) self.return_dict = return_dict def forward(self, x): out = self.graph_module(x) if self.return_dict: return out return list(out.values()) class GraphExtractNet(nn.Module): return_dict: torch.jit.Final[bool] def __init__(self, model: nn.Module, return_nodes: Union[Dict[str, str], List[str]], squeeze_out: bool=True, return_dict: bool=False): super().__init__() self.squeeze_out = squeeze_out self.graph_module = create_feature_extractor(model, return_nodes) self.return_dict = return_dict def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]: out = self.graph_module(x) if self.return_dict: return out out = list(out.values()) return out[0] if self.squeeze_out and len(out) == 1 else out # File: pytorch-image-models-main/timm/models/_helpers.py """""" import logging import os from collections import OrderedDict from typing import Any, Callable, Dict, Optional, Union import torch try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False _logger = logging.getLogger(__name__) __all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint'] def _remove_prefix(text, prefix): if text.startswith(prefix): return text[len(prefix):] return text def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]: cleaned_state_dict = {} to_remove = ('module.', '_orig_mod.') for (k, v) in state_dict.items(): for r in to_remove: k = _remove_prefix(k, r) cleaned_state_dict[k] = v return cleaned_state_dict def load_state_dict(checkpoint_path: str, use_ema: bool=True, device: Union[str, torch.device]='cpu', weights_only: bool=False) -> Dict[str, Any]: if checkpoint_path and os.path.isfile(checkpoint_path): if str(checkpoint_path).endswith('.safetensors'): assert _has_safetensors, '`pip install safetensors` to use .safetensors' checkpoint = safetensors.torch.load_file(checkpoint_path, device=device) else: try: checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=weights_only) except TypeError: checkpoint = torch.load(checkpoint_path, map_location=device) state_dict_key = '' if isinstance(checkpoint, dict): if use_ema and checkpoint.get('state_dict_ema', None) is not None: state_dict_key = 'state_dict_ema' elif use_ema and checkpoint.get('model_ema', None) is not None: state_dict_key = 'model_ema' elif 'state_dict' in checkpoint: state_dict_key = 'state_dict' elif 'model' in checkpoint: state_dict_key = 'model' state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint) _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) return state_dict else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError() def load_checkpoint(model: torch.nn.Module, checkpoint_path: str, use_ema: bool=True, device: Union[str, torch.device]='cpu', strict: bool=True, remap: bool=False, filter_fn: Optional[Callable]=None, weights_only: bool=False): if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): if hasattr(model, 'load_pretrained'): model.load_pretrained(checkpoint_path) else: raise NotImplementedError('Model cannot load numpy checkpoint') return state_dict = load_state_dict(checkpoint_path, use_ema, device=device, weights_only=weights_only) if remap: state_dict = remap_state_dict(state_dict, model) elif filter_fn: state_dict = filter_fn(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def remap_state_dict(state_dict: Dict[str, Any], model: torch.nn.Module, allow_reshape: bool=True): out_dict = {} for ((ka, va), (kb, vb)) in zip(model.state_dict().items(), state_dict.items()): assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' if va.shape != vb.shape: if allow_reshape: vb = vb.reshape(va.shape) else: assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' out_dict[ka] = vb return out_dict def resume_checkpoint(model: torch.nn.Module, checkpoint_path: str, optimizer: torch.optim.Optimizer=None, loss_scaler: Any=None, log_info: bool=True): resume_epoch = None if os.path.isfile(checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: if log_info: _logger.info('Restoring model state from checkpoint...') state_dict = clean_state_dict(checkpoint['state_dict']) model.load_state_dict(state_dict) if optimizer is not None and 'optimizer' in checkpoint: if log_info: _logger.info('Restoring optimizer state from checkpoint...') optimizer.load_state_dict(checkpoint['optimizer']) if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: if log_info: _logger.info('Restoring AMP loss scaler state from checkpoint...') loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) if 'epoch' in checkpoint: resume_epoch = checkpoint['epoch'] if 'version' in checkpoint and checkpoint['version'] > 1: resume_epoch += 1 if log_info: _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) else: model.load_state_dict(checkpoint) if log_info: _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) return resume_epoch else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError() # File: pytorch-image-models-main/timm/models/_hub.py import hashlib import json import logging import os from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import Iterable, Optional, Union import torch from torch.hub import HASH_REGEX, download_url_to_file, urlparse try: from torch.hub import get_dir except ImportError: from torch.hub import _get_torch_home as get_dir try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False try: from typing import Literal except ImportError: from typing_extensions import Literal from timm import __version__ from timm.models._pretrained import filter_pretrained_cfg try: from huggingface_hub import create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, repo_type_and_id_from_hf_id, upload_folder from huggingface_hub.utils import EntryNotFoundError hf_hub_download = partial(hf_hub_download, library_name='timm', library_version=__version__) _has_hf_hub = True except ImportError: hf_hub_download = None _has_hf_hub = False _logger = logging.getLogger(__name__) __all__ = ['get_cache_dir', 'download_cached_file', 'has_hf_hub', 'hf_split', 'load_model_config_from_hf', 'load_state_dict_from_hf', 'save_for_hf', 'push_to_hf_hub'] HF_WEIGHTS_NAME = 'pytorch_model.bin' HF_SAFE_WEIGHTS_NAME = 'model.safetensors' HF_OPEN_CLIP_WEIGHTS_NAME = 'open_clip_pytorch_model.bin' HF_OPEN_CLIP_SAFE_WEIGHTS_NAME = 'open_clip_model.safetensors' def get_cache_dir(child_dir=''): if os.getenv('TORCH_MODEL_ZOO'): _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') hub_dir = get_dir() child_dir = () if not child_dir else (child_dir,) model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) os.makedirs(model_dir, exist_ok=True) return model_dir def download_cached_file(url, check_hash=True, progress=False): if isinstance(url, (list, tuple)): (url, filename) = url else: parts = urlparse(url) filename = os.path.basename(parts.path) cached_file = os.path.join(get_cache_dir(), filename) if not os.path.exists(cached_file): _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) hash_prefix = None if check_hash: r = HASH_REGEX.search(filename) hash_prefix = r.group(1) if r else None download_url_to_file(url, cached_file, hash_prefix, progress=progress) return cached_file def check_cached_file(url, check_hash=True): if isinstance(url, (list, tuple)): (url, filename) = url else: parts = urlparse(url) filename = os.path.basename(parts.path) cached_file = os.path.join(get_cache_dir(), filename) if os.path.exists(cached_file): if check_hash: r = HASH_REGEX.search(filename) hash_prefix = r.group(1) if r else None if hash_prefix: with open(cached_file, 'rb') as f: hd = hashlib.sha256(f.read()).hexdigest() if hd[:len(hash_prefix)] != hash_prefix: return False return True return False def has_hf_hub(necessary=False): if not _has_hf_hub and necessary: raise RuntimeError('Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') return _has_hf_hub def hf_split(hf_id: str): rev_split = hf_id.split('@') assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' hf_model_id = rev_split[0] hf_revision = rev_split[-1] if len(rev_split) > 1 else None return (hf_model_id, hf_revision) def load_cfg_from_json(json_file: Union[str, os.PathLike]): with open(json_file, 'r', encoding='utf-8') as reader: text = reader.read() return json.loads(text) def download_from_hf(model_id: str, filename: str): (hf_model_id, hf_revision) = hf_split(model_id) return hf_hub_download(hf_model_id, filename, revision=hf_revision) def load_model_config_from_hf(model_id: str): assert has_hf_hub(True) cached_file = download_from_hf(model_id, 'config.json') hf_config = load_cfg_from_json(cached_file) if 'pretrained_cfg' not in hf_config: pretrained_cfg = hf_config hf_config = {} hf_config['architecture'] = pretrained_cfg.pop('architecture') hf_config['num_features'] = pretrained_cfg.pop('num_features', None) if 'labels' in pretrained_cfg: pretrained_cfg['label_names'] = pretrained_cfg.pop('labels') hf_config['pretrained_cfg'] = pretrained_cfg pretrained_cfg = hf_config['pretrained_cfg'] pretrained_cfg['hf_hub_id'] = model_id pretrained_cfg['source'] = 'hf-hub' if 'num_classes' in hf_config: pretrained_cfg['num_classes'] = hf_config['num_classes'] if 'label_names' in hf_config: pretrained_cfg['label_names'] = hf_config.pop('label_names') if 'label_descriptions' in hf_config: pretrained_cfg['label_descriptions'] = hf_config.pop('label_descriptions') model_args = hf_config.get('model_args', {}) model_name = hf_config['architecture'] return (pretrained_cfg, model_name, model_args) def load_state_dict_from_hf(model_id: str, filename: str=HF_WEIGHTS_NAME, weights_only: bool=False): assert has_hf_hub(True) (hf_model_id, hf_revision) = hf_split(model_id) if _has_safetensors: for safe_filename in _get_safe_alternatives(filename): try: cached_safe_file = hf_hub_download(repo_id=hf_model_id, filename=safe_filename, revision=hf_revision) _logger.info(f"[{model_id}] Safe alternative available for '{filename}' (as '{safe_filename}'). Loading weights using safetensors.") return safetensors.torch.load_file(cached_safe_file, device='cpu') except EntryNotFoundError: pass cached_file = hf_hub_download(hf_model_id, filename=filename, revision=hf_revision) _logger.debug(f"[{model_id}] Safe alternative not found for '{filename}'. Loading weights using default pytorch.") try: state_dict = torch.load(cached_file, map_location='cpu', weights_only=weights_only) except TypeError: state_dict = torch.load(cached_file, map_location='cpu') return state_dict def load_custom_from_hf(model_id: str, filename: str, model: torch.nn.Module): assert has_hf_hub(True) (hf_model_id, hf_revision) = hf_split(model_id) cached_file = hf_hub_download(hf_model_id, filename=filename, revision=hf_revision) return model.load_pretrained(cached_file) def save_config_for_hf(model, config_path: str, model_config: Optional[dict]=None, model_args: Optional[dict]=None): model_config = model_config or {} hf_config = {} pretrained_cfg = filter_pretrained_cfg(model.pretrained_cfg, remove_source=True, remove_null=True) hf_config['architecture'] = pretrained_cfg.pop('architecture') hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes) hf_config['num_features'] = model_config.pop('num_features', model.num_features) global_pool_type = model_config.pop('global_pool', getattr(model, 'global_pool', None)) if isinstance(global_pool_type, str) and global_pool_type: hf_config['global_pool'] = global_pool_type if 'labels' in model_config: _logger.warning("'labels' as a config field for is deprecated. Please use 'label_names' and 'label_descriptions'. Renaming provided 'labels' field to 'label_names'.") model_config.setdefault('label_names', model_config.pop('labels')) label_names = model_config.pop('label_names', None) if label_names: assert isinstance(label_names, (dict, list, tuple)) hf_config['label_names'] = label_names label_descriptions = model_config.pop('label_descriptions', None) if label_descriptions: assert isinstance(label_descriptions, dict) hf_config['label_descriptions'] = label_descriptions if model_args: hf_config['model_args'] = model_args hf_config['pretrained_cfg'] = pretrained_cfg hf_config.update(model_config) with config_path.open('w') as f: json.dump(hf_config, f, indent=2) def save_for_hf(model, save_directory: str, model_config: Optional[dict]=None, model_args: Optional[dict]=None, safe_serialization: Union[bool, Literal['both']]=False): assert has_hf_hub(True) save_directory = Path(save_directory) save_directory.mkdir(exist_ok=True, parents=True) tensors = model.state_dict() if safe_serialization is True or safe_serialization == 'both': assert _has_safetensors, '`pip install safetensors` to use .safetensors' safetensors.torch.save_file(tensors, save_directory / HF_SAFE_WEIGHTS_NAME) if safe_serialization is False or safe_serialization == 'both': torch.save(tensors, save_directory / HF_WEIGHTS_NAME) config_path = save_directory / 'config.json' save_config_for_hf(model, config_path, model_config=model_config, model_args=model_args) def push_to_hf_hub(model: torch.nn.Module, repo_id: str, commit_message: str='Add model', token: Optional[str]=None, revision: Optional[str]=None, private: bool=False, create_pr: bool=False, model_config: Optional[dict]=None, model_card: Optional[dict]=None, model_args: Optional[dict]=None, safe_serialization: Union[bool, Literal['both']]='both'): repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True) (_, repo_owner, repo_name) = repo_type_and_id_from_hf_id(repo_url) repo_id = f'{repo_owner}/{repo_name}' try: get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename='README.md', revision=revision)) has_readme = True except EntryNotFoundError: has_readme = False with TemporaryDirectory() as tmpdir: save_for_hf(model, tmpdir, model_config=model_config, model_args=model_args, safe_serialization=safe_serialization) if not has_readme: model_card = model_card or {} model_name = repo_id.split('/')[-1] readme_path = Path(tmpdir) / 'README.md' readme_text = generate_readme(model_card, model_name) readme_path.write_text(readme_text) return upload_folder(repo_id=repo_id, folder_path=tmpdir, revision=revision, create_pr=create_pr, commit_message=commit_message) def generate_readme(model_card: dict, model_name: str): readme_text = '---\n' readme_text += 'tags:\n- image-classification\n- timm\n' readme_text += 'library_name: timm\n' readme_text += f"license: {model_card.get('license', 'apache-2.0')}\n" if 'details' in model_card and 'Dataset' in model_card['details']: readme_text += 'datasets:\n' if isinstance(model_card['details']['Dataset'], (tuple, list)): for d in model_card['details']['Dataset']: readme_text += f'- {d.lower()}\n' else: readme_text += f"- {model_card['details']['Dataset'].lower()}\n" if 'Pretrain Dataset' in model_card['details']: if isinstance(model_card['details']['Pretrain Dataset'], (tuple, list)): for d in model_card['details']['Pretrain Dataset']: readme_text += f'- {d.lower()}\n' else: readme_text += f"- {model_card['details']['Pretrain Dataset'].lower()}\n" readme_text += '---\n' readme_text += f'# Model card for {model_name}\n' if 'description' in model_card: readme_text += f"\n{model_card['description']}\n" if 'details' in model_card: readme_text += f'\n## Model Details\n' for (k, v) in model_card['details'].items(): if isinstance(v, (list, tuple)): readme_text += f'- **{k}:**\n' for vi in v: readme_text += f' - {vi}\n' elif isinstance(v, dict): readme_text += f'- **{k}:**\n' for (ki, vi) in v.items(): readme_text += f' - {ki}: {vi}\n' else: readme_text += f'- **{k}:** {v}\n' if 'usage' in model_card: readme_text += f'\n## Model Usage\n' readme_text += model_card['usage'] readme_text += '\n' if 'comparison' in model_card: readme_text += f'\n## Model Comparison\n' readme_text += model_card['comparison'] readme_text += '\n' if 'citation' in model_card: readme_text += f'\n## Citation\n' if not isinstance(model_card['citation'], (list, tuple)): citations = [model_card['citation']] else: citations = model_card['citation'] for c in citations: readme_text += f'```bibtex\n{c}\n```\n' return readme_text def _get_safe_alternatives(filename: str) -> Iterable[str]: if filename == HF_WEIGHTS_NAME: yield HF_SAFE_WEIGHTS_NAME if filename == HF_OPEN_CLIP_WEIGHTS_NAME: yield HF_OPEN_CLIP_SAFE_WEIGHTS_NAME if filename not in (HF_WEIGHTS_NAME, HF_OPEN_CLIP_WEIGHTS_NAME) and filename.endswith('.bin'): yield (filename[:-4] + '.safetensors') # File: pytorch-image-models-main/timm/models/_manipulate.py import collections.abc import math import re from collections import defaultdict from itertools import chain from typing import Any, Callable, Dict, Iterator, Tuple, Type, Union import torch from torch import nn as nn from torch.utils.checkpoint import checkpoint __all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv', 'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq'] def model_parameters(model: nn.Module, exclude_head: bool=False): if exclude_head: return [p for p in model.parameters()][:-2] else: return model.parameters() def named_apply(fn: Callable, module: nn.Module, name='', depth_first: bool=True, include_root: bool=False) -> nn.Module: if not depth_first and include_root: fn(module=module, name=name) for (child_name, child_module) in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) if depth_first and include_root: fn(module=module, name=name) return module def named_modules(module: nn.Module, name: str='', depth_first: bool=True, include_root: bool=False): if not depth_first and include_root: yield (name, module) for (child_name, child_module) in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name yield from named_modules(module=child_module, name=child_name, depth_first=depth_first, include_root=True) if depth_first and include_root: yield (name, module) def named_modules_with_params(module: nn.Module, name: str='', depth_first: bool=True, include_root: bool=False): if module._parameters and (not depth_first) and include_root: yield (name, module) for (child_name, child_module) in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name yield from named_modules_with_params(module=child_module, name=child_name, depth_first=depth_first, include_root=True) if module._parameters and depth_first and include_root: yield (name, module) MATCH_PREV_GROUP = (99999,) def group_with_matcher(named_objects: Iterator[Tuple[str, Any]], group_matcher: Union[Dict, Callable], return_values: bool=False, reverse: bool=False): if isinstance(group_matcher, dict): compiled = [] for (group_ordinal, (group_name, mspec)) in enumerate(group_matcher.items()): if mspec is None: continue if isinstance(mspec, (tuple, list)): for sspec in mspec: compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])] else: compiled += [(re.compile(mspec), (group_ordinal,), None)] group_matcher = compiled def _get_grouping(name): if isinstance(group_matcher, (list, tuple)): for (match_fn, prefix, suffix) in group_matcher: r = match_fn.match(name) if r: parts = (prefix, r.groups(), suffix) return tuple(map(float, chain.from_iterable(filter(None, parts)))) return (float('inf'),) else: ord = group_matcher(name) if not isinstance(ord, collections.abc.Iterable): return (ord,) return tuple(ord) grouping = defaultdict(list) for (k, v) in named_objects: grouping[_get_grouping(k)].append(v if return_values else k) layer_id_to_param = defaultdict(list) lid = -1 for k in sorted(filter(lambda x: x is not None, grouping.keys())): if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]: lid += 1 layer_id_to_param[lid].extend(grouping[k]) if reverse: assert not return_values, 'reverse mapping only sensible for name output' param_to_layer_id = {} for (lid, lm) in layer_id_to_param.items(): for n in lm: param_to_layer_id[n] = lid return param_to_layer_id return layer_id_to_param def group_parameters(module: nn.Module, group_matcher, return_values: bool=False, reverse: bool=False): return group_with_matcher(module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse) def group_modules(module: nn.Module, group_matcher, return_values: bool=False, reverse: bool=False): return group_with_matcher(named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse) def flatten_modules(named_modules: Iterator[Tuple[str, nn.Module]], depth: int=1, prefix: Union[str, Tuple[str, ...]]='', module_types: Union[str, Tuple[Type[nn.Module]]]='sequential'): prefix_is_tuple = isinstance(prefix, tuple) if isinstance(module_types, str): if module_types == 'container': module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict) else: module_types = (nn.Sequential,) for (name, module) in named_modules: if depth and isinstance(module, module_types): yield from flatten_modules(module.named_children(), depth - 1, prefix=(name,) if prefix_is_tuple else name, module_types=module_types) elif prefix_is_tuple: name = prefix + (name,) yield (name, module) else: if prefix: name = '.'.join([prefix, name]) yield (name, module) def checkpoint_seq(functions, x, every=1, flatten=False, skip_last=False, preserve_rng_state=True): def run_function(start, end, functions): def forward(_x): for j in range(start, end + 1): _x = functions[j](_x) return _x return forward if isinstance(functions, torch.nn.Sequential): functions = functions.children() if flatten: functions = chain.from_iterable(functions) if not isinstance(functions, (tuple, list)): functions = tuple(functions) num_checkpointed = len(functions) if skip_last: num_checkpointed -= 1 end = -1 for start in range(0, num_checkpointed, every): end = min(start + every - 1, num_checkpointed - 1) x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state) if skip_last: return run_function(end + 1, len(functions) - 1, functions)(x) return x def adapt_input_conv(in_chans, conv_weight): conv_type = conv_weight.dtype conv_weight = conv_weight.float() (O, I, J, K) = conv_weight.shape if in_chans == 1: if I > 3: assert conv_weight.shape[1] % 3 == 0 conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) conv_weight = conv_weight.sum(dim=2, keepdim=False) else: conv_weight = conv_weight.sum(dim=1, keepdim=True) elif in_chans != 3: if I != 3: raise NotImplementedError('Weight format not supported by conversion.') else: repeat = int(math.ceil(in_chans / 3)) conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] conv_weight *= 3 / float(in_chans) conv_weight = conv_weight.to(conv_type) return conv_weight # File: pytorch-image-models-main/timm/models/_pretrained.py import copy from collections import deque, defaultdict from dataclasses import dataclass, field, replace, asdict from typing import Any, Deque, Dict, Tuple, Optional, Union __all__ = ['PretrainedCfg', 'filter_pretrained_cfg', 'DefaultCfg'] @dataclass class PretrainedCfg: url: Optional[Union[str, Tuple[str, str]]] = None file: Optional[str] = None state_dict: Optional[Dict[str, Any]] = None hf_hub_id: Optional[str] = None hf_hub_filename: Optional[str] = None source: Optional[str] = None architecture: Optional[str] = None tag: Optional[str] = None custom_load: bool = False input_size: Tuple[int, int, int] = (3, 224, 224) test_input_size: Optional[Tuple[int, int, int]] = None min_input_size: Optional[Tuple[int, int, int]] = None fixed_input_size: bool = False interpolation: str = 'bicubic' crop_pct: float = 0.875 test_crop_pct: Optional[float] = None crop_mode: str = 'center' mean: Tuple[float, ...] = (0.485, 0.456, 0.406) std: Tuple[float, ...] = (0.229, 0.224, 0.225) num_classes: int = 1000 label_offset: Optional[int] = None label_names: Optional[Tuple[str]] = None label_descriptions: Optional[Dict[str, str]] = None pool_size: Optional[Tuple[int, ...]] = None test_pool_size: Optional[Tuple[int, ...]] = None first_conv: Optional[str] = None classifier: Optional[str] = None license: Optional[str] = None description: Optional[str] = None origin_url: Optional[str] = None paper_name: Optional[str] = None paper_ids: Optional[Union[str, Tuple[str]]] = None notes: Optional[Tuple[str]] = None @property def has_weights(self): return self.url or self.file or self.hf_hub_id def to_dict(self, remove_source=False, remove_null=True): return filter_pretrained_cfg(asdict(self), remove_source=remove_source, remove_null=remove_null) def filter_pretrained_cfg(cfg, remove_source=False, remove_null=True): filtered_cfg = {} keep_null = {'pool_size', 'first_conv', 'classifier'} for (k, v) in cfg.items(): if remove_source and k in {'url', 'file', 'hf_hub_id', 'hf_hub_id', 'hf_hub_filename', 'source'}: continue if remove_null and v is None and (k not in keep_null): continue filtered_cfg[k] = v return filtered_cfg @dataclass class DefaultCfg: tags: Deque[str] = field(default_factory=deque) cfgs: Dict[str, PretrainedCfg] = field(default_factory=dict) is_pretrained: bool = False @property def default(self): return self.cfgs[self.tags[0]] @property def default_with_tag(self): tag = self.tags[0] return (tag, self.cfgs[tag]) # File: pytorch-image-models-main/timm/models/_prune.py import os import pkgutil from copy import deepcopy from torch import nn as nn from timm.layers import Conv2dSame, BatchNormAct2d, Linear __all__ = ['extract_layer', 'set_layer', 'adapt_model_from_string', 'adapt_model_from_file'] def extract_layer(model, layer): layer = layer.split('.') module = model if hasattr(model, 'module') and layer[0] != 'module': module = model.module if not hasattr(model, 'module') and layer[0] == 'module': layer = layer[1:] for l in layer: if hasattr(module, l): if not l.isdigit(): module = getattr(module, l) else: module = module[int(l)] else: return module return module def set_layer(model, layer, val): layer = layer.split('.') module = model if hasattr(model, 'module') and layer[0] != 'module': module = model.module lst_index = 0 module2 = module for l in layer: if hasattr(module2, l): if not l.isdigit(): module2 = getattr(module2, l) else: module2 = module2[int(l)] lst_index += 1 lst_index -= 1 for l in layer[:lst_index]: if not l.isdigit(): module = getattr(module, l) else: module = module[int(l)] l = layer[lst_index] setattr(module, l, val) def adapt_model_from_string(parent_module, model_string): separator = '***' state_dict = {} lst_shape = model_string.split(separator) for k in lst_shape: k = k.split(':') key = k[0] shape = k[1][1:-1].split(',') if shape[0] != '': state_dict[key] = [int(i) for i in shape] new_module = deepcopy(parent_module) for (n, m) in parent_module.named_modules(): old_module = extract_layer(parent_module, n) if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame): if isinstance(old_module, Conv2dSame): conv = Conv2dSame else: conv = nn.Conv2d s = state_dict[n + '.weight'] in_channels = s[1] out_channels = s[0] g = 1 if old_module.groups > 1: in_channels = out_channels g = in_channels new_conv = conv(in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size, bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation, groups=g, stride=old_module.stride) set_layer(new_module, n, new_conv) elif isinstance(old_module, BatchNormAct2d): new_bn = BatchNormAct2d(state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, affine=old_module.affine, track_running_stats=True) new_bn.drop = old_module.drop new_bn.act = old_module.act set_layer(new_module, n, new_bn) elif isinstance(old_module, nn.BatchNorm2d): new_bn = nn.BatchNorm2d(num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, affine=old_module.affine, track_running_stats=True) set_layer(new_module, n, new_bn) elif isinstance(old_module, nn.Linear): num_features = state_dict[n + '.weight'][1] new_fc = Linear(in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None) set_layer(new_module, n, new_fc) if hasattr(new_module, 'num_features'): if getattr(new_module, 'head_hidden_size', 0) == new_module.num_features: new_module.head_hidden_size = num_features new_module.num_features = num_features new_module.eval() parent_module.eval() return new_module def adapt_model_from_file(parent_module, model_variant): adapt_data = pkgutil.get_data(__name__, os.path.join('_pruned', model_variant + '.txt')) return adapt_model_from_string(parent_module, adapt_data.decode('utf-8').strip()) # File: pytorch-image-models-main/timm/models/_registry.py """""" import fnmatch import re import sys import warnings from collections import defaultdict, deque from copy import deepcopy from dataclasses import replace from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple from ._pretrained import PretrainedCfg, DefaultCfg __all__ = ['split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs', 'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', 'get_pretrained_cfg_value', 'is_model_pretrained', 'get_pretrained_cfgs_for_arch'] _module_to_models: Dict[str, Set[str]] = defaultdict(set) _model_to_module: Dict[str, str] = {} _model_entrypoints: Dict[str, Callable[..., Any]] = {} _model_has_pretrained: Set[str] = set() _model_default_cfgs: Dict[str, PretrainedCfg] = {} _model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} _model_with_tags: Dict[str, List[str]] = defaultdict(list) _module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict) _deprecated_models: Dict[str, Optional[str]] = {} def split_model_name_tag(model_name: str, no_tag: str='') -> Tuple[str, str]: (model_name, *tag_list) = model_name.split('.', 1) tag = tag_list[0] if tag_list else no_tag return (model_name, tag) def get_arch_name(model_name: str) -> str: return split_model_name_tag(model_name)[0] def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]): out = defaultdict(DefaultCfg) default_set = set() for (k, v) in cfgs.items(): if isinstance(v, dict): v = PretrainedCfg(**v) has_weights = v.has_weights (model, tag) = split_model_name_tag(k) is_default_set = model in default_set priority = has_weights and (not tag) or (tag.endswith('*') and (not is_default_set)) tag = tag.strip('*') default_cfg = out[model] if priority: default_cfg.tags.appendleft(tag) default_set.add(model) elif has_weights and (not default_cfg.is_pretrained): default_cfg.tags.appendleft(tag) else: default_cfg.tags.append(tag) if has_weights: default_cfg.is_pretrained = True default_cfg.cfgs[tag] = v return out def register_model(fn: Callable[..., Any]) -> Callable[..., Any]: mod = sys.modules[fn.__module__] module_name_split = fn.__module__.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' model_name = fn.__name__ if hasattr(mod, '__all__'): mod.__all__.append(model_name) else: mod.__all__ = [model_name] if model_name in _model_entrypoints: warnings.warn(f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being registered conflicts with an existing name. Please check if this is not expected.', stacklevel=2) _model_entrypoints[model_name] = fn _model_to_module[model_name] = module_name _module_to_models[module_name].add(model_name) if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: default_cfg = mod.default_cfgs[model_name] if not isinstance(default_cfg, DefaultCfg): assert isinstance(default_cfg, dict) pretrained_cfg = PretrainedCfg(**default_cfg) default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg}) for (tag_idx, tag) in enumerate(default_cfg.tags): is_default = tag_idx == 0 pretrained_cfg = default_cfg.cfgs[tag] model_name_tag = '.'.join([model_name, tag]) if tag else model_name replace_items = dict(architecture=model_name, tag=tag if tag else None) if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/': replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag pretrained_cfg = replace(pretrained_cfg, **replace_items) if is_default: _model_pretrained_cfgs[model_name] = pretrained_cfg if pretrained_cfg.has_weights: _model_has_pretrained.add(model_name) if tag: _model_pretrained_cfgs[model_name_tag] = pretrained_cfg if pretrained_cfg.has_weights: _model_has_pretrained.add(model_name_tag) _model_with_tags[model_name].append(model_name_tag) else: _model_with_tags[model_name].append(model_name) _model_default_cfgs[model_name] = default_cfg return fn def _deprecated_model_shim(deprecated_name: str, current_fn: Callable=None, current_tag: str=''): def _fn(pretrained=False, **kwargs): assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.' current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__ warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2) pretrained_cfg = kwargs.pop('pretrained_cfg', None) return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs) return _fn def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]): mod = sys.modules[module_name] module_name_split = module_name.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' for (deprecated, current) in deprecation_map.items(): if hasattr(mod, '__all__'): mod.__all__.append(deprecated) current_fn = None current_tag = '' if current: (current_name, current_tag) = split_model_name_tag(current) current_fn = getattr(mod, current_name) deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag) setattr(mod, deprecated, deprecated_entrypoint_fn) _model_entrypoints[deprecated] = deprecated_entrypoint_fn _model_to_module[deprecated] = module_name _module_to_models[module_name].add(deprecated) _deprecated_models[deprecated] = current _module_to_deprecated_models[module_name][deprecated] = current def _natural_key(string_: str) -> List[Union[int, str]]: return [int(s) if s.isdigit() else s for s in re.split('(\\d+)', string_.lower())] def _expand_filter(filter: str): (filter_base, filter_tag) = split_model_name_tag(filter) if not filter_tag: return ['.'.join([filter_base, '*']), filter] else: return [filter] def list_models(filter: Union[str, List[str]]='', module: Union[str, List[str]]='', pretrained: bool=False, exclude_filters: Union[str, List[str]]='', name_matches_cfg: bool=False, include_tags: Optional[bool]=None) -> List[str]: if filter: include_filters = filter if isinstance(filter, (tuple, list)) else [filter] else: include_filters = [] if include_tags is None: include_tags = pretrained if not module: all_models: Set[str] = set(_model_entrypoints.keys()) elif isinstance(module, str): all_models: Set[str] = _module_to_models[module] else: assert isinstance(module, Sequence) all_models: Set[str] = set() for m in module: all_models.update(_module_to_models[m]) all_models = all_models - _deprecated_models.keys() if include_tags: models_with_tags: Set[str] = set() for m in all_models: models_with_tags.update(_model_with_tags[m]) all_models = models_with_tags include_filters = [ef for f in include_filters for ef in _expand_filter(f)] exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)] if include_filters: models: Set[str] = set() for f in include_filters: include_models = fnmatch.filter(all_models, f) if len(include_models): models = models.union(include_models) else: models = all_models if exclude_filters: if not isinstance(exclude_filters, (tuple, list)): exclude_filters = [exclude_filters] for xf in exclude_filters: exclude_models = fnmatch.filter(models, xf) if len(exclude_models): models = models.difference(exclude_models) if pretrained: models = _model_has_pretrained.intersection(models) if name_matches_cfg: models = set(_model_pretrained_cfgs).intersection(models) return sorted(models, key=_natural_key) def list_pretrained(filter: Union[str, List[str]]='', exclude_filters: str='') -> List[str]: return list_models(filter=filter, pretrained=True, exclude_filters=exclude_filters, include_tags=True) def get_deprecated_models(module: str='') -> Dict[str, str]: all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models return deepcopy(all_deprecated) def is_model(model_name: str) -> bool: arch_name = get_arch_name(model_name) return arch_name in _model_entrypoints def model_entrypoint(model_name: str, module_filter: Optional[str]=None) -> Callable[..., Any]: arch_name = get_arch_name(model_name) if module_filter and arch_name not in _module_to_models.get(module_filter, {}): raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.') return _model_entrypoints[arch_name] def list_modules() -> List[str]: modules = _module_to_models.keys() return sorted(modules) def is_model_in_modules(model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]]) -> bool: arch_name = get_arch_name(model_name) assert isinstance(module_names, (tuple, list, set)) return any((arch_name in _module_to_models[n] for n in module_names)) def is_model_pretrained(model_name: str) -> bool: return model_name in _model_has_pretrained def get_pretrained_cfg(model_name: str, allow_unregistered: bool=True) -> Optional[PretrainedCfg]: if model_name in _model_pretrained_cfgs: return deepcopy(_model_pretrained_cfgs[model_name]) (arch_name, tag) = split_model_name_tag(model_name) if arch_name in _model_default_cfgs: raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.') if allow_unregistered: return None raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.') def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]: cfg = get_pretrained_cfg(model_name, allow_unregistered=False) return getattr(cfg, cfg_key, None) def get_arch_pretrained_cfgs(model_name: str) -> Dict[str, PretrainedCfg]: (arch_name, _) = split_model_name_tag(model_name) model_names = _model_with_tags[arch_name] cfgs = {m: _model_pretrained_cfgs[m] for m in model_names} return cfgs # File: pytorch-image-models-main/timm/models/beit.py """""" import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, SwiGLU, LayerNorm, DropPath, trunc_normal_, use_fused_attn from timm.layers import resample_patch_embed, resample_abs_pos_embed, resize_rel_pos_bias_table, ndgrid from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._registry import generate_default_cfgs, register_model __all__ = ['Beit'] def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor: num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 window_area = window_size[0] * window_size[1] coords = torch.stack(ndgrid(torch.arange(window_size[0]), torch.arange(window_size[1]))) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += window_size[0] - 1 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) relative_position_index[0, 0:] = num_relative_distance - 3 relative_position_index[0:, 0] = num_relative_distance - 2 relative_position_index[0, 0] = num_relative_distance - 1 return relative_position_index class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim: int, num_heads: int=8, qkv_bias: bool=False, qkv_bias_separate: bool=False, attn_drop: float=0.0, proj_drop: float=0.0, window_size: Optional[Tuple[int, int]]=None, attn_head_dim: Optional[int]=None): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv_bias_separate = qkv_bias_separate self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = None self.k_bias = None self.v_bias = None if window_size: self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, num_heads)) self.register_buffer('relative_position_index', gen_relative_position_index(window_size), persistent=False) else: self.window_size = None self.relative_position_bias_table = None self.relative_position_index = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def _get_rel_pos_bias(self): relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() return relative_position_bias.unsqueeze(0) def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor]=None): (B, N, C) = x.shape if self.q_bias is None: qkv = self.qkv(x) else: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.qkv_bias_separate: qkv = self.qkv(x) qkv += qkv_bias else: qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) if self.fused_attn: rel_pos_bias = None if self.relative_position_bias_table is not None: rel_pos_bias = self._get_rel_pos_bias() if shared_rel_pos_bias is not None: rel_pos_bias = rel_pos_bias + shared_rel_pos_bias elif shared_rel_pos_bias is not None: rel_pos_bias = shared_rel_pos_bias x = F.scaled_dot_product_attention(q, k, v, attn_mask=rel_pos_bias, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.relative_position_bias_table is not None: attn = attn + self._get_rel_pos_bias() if shared_rel_pos_bias is not None: attn = attn + shared_rel_pos_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim: int, num_heads: int, qkv_bias: bool=False, mlp_ratio: float=4.0, scale_mlp: bool=False, swiglu_mlp: bool=False, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, init_values: Optional[float]=None, act_layer: Callable=nn.GELU, norm_layer: Callable=LayerNorm, window_size: Optional[Tuple[int, int]]=None, attn_head_dim: Optional[int]=None): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, window_size=window_size, attn_head_dim=attn_head_dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) if swiglu_mlp: self.mlp = SwiGLU(in_features=dim, hidden_features=int(dim * mlp_ratio), norm_layer=norm_layer if scale_mlp else None, drop=proj_drop) else: self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() if init_values: self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) else: (self.gamma_1, self.gamma_2) = (None, None) def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor]=None): if self.gamma_1 is None: x = x + self.drop_path1(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias)) x = x + self.drop_path2(self.mlp(self.norm2(x))) else: x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias)) x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) return x class RelativePositionBias(nn.Module): def __init__(self, window_size, num_heads): super().__init__() self.window_size = window_size self.window_area = window_size[0] * window_size[1] num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) self.register_buffer('relative_position_index', gen_relative_position_index(window_size)) def forward(self): relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_area + 1, self.window_area + 1, -1) return relative_position_bias.permute(2, 0, 1).contiguous() class Beit(nn.Module): def __init__(self, img_size: Union[int, Tuple[int, int]]=224, patch_size: Union[int, Tuple[int, int]]=16, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', embed_dim: int=768, depth: int=12, num_heads: int=12, qkv_bias: bool=True, mlp_ratio: float=4.0, swiglu_mlp: bool=False, scale_mlp: bool=False, drop_rate: float=0.0, pos_drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, norm_layer: Callable=LayerNorm, init_values: Optional[float]=None, use_abs_pos_emb: bool=True, use_rel_pos_bias: bool=False, use_shared_rel_pos_bias: bool=False, head_init_scale: float=0.001): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.num_prefix_tokens = 1 self.grad_checkpointing = False self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None self.pos_drop = nn.Dropout(p=pos_drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.grid_size, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, mlp_ratio=mlp_ratio, scale_mlp=scale_mlp, swiglu_mlp=swiglu_mlp, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.grid_size if use_rel_pos_bias else None) for i in range(depth)]) self.feature_info = [dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] use_fc_norm = self.global_pool == 'avg' self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=0.02) trunc_normal_(self.cls_token, std=0.02) self.fix_init_weight() if isinstance(self.head, nn.Linear): trunc_normal_(self.head.weight, std=0.02) self.head.weight.data.mul_(head_init_scale) self.head.bias.data.mul_(head_init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for (layer_id, layer) in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): nwd = {'pos_embed', 'cls_token'} for (n, _) in self.named_parameters(): if 'relative_position_bias_table' in n: nwd.add(n) return nwd @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^cls_token|pos_embed|patch_embed|rel_pos_bias', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, return_prefix_tokens: bool=False, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape x = self.patch_embed(x) x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x, shared_rel_pos_bias=rel_pos_bias) if i in take_indices: intermediates.append(self.norm(x) if norm else x) if self.num_prefix_tokens: prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] if reshape: (H, W) = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if not torch.jit.is_scripting() and return_prefix_tokens: intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias) else: x = blk(x, shared_rel_pos_bias=rel_pos_bias) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'beit_base_patch16_224.in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/'), 'beit_base_patch16_384.in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'beit_base_patch16_224.in22k_ft_in22k': _cfg(hf_hub_id='timm/', num_classes=21841), 'beit_large_patch16_224.in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/'), 'beit_large_patch16_384.in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'beit_large_patch16_512.in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), crop_pct=1.0), 'beit_large_patch16_224.in22k_ft_in22k': _cfg(hf_hub_id='timm/', num_classes=21841), 'beitv2_base_patch16_224.in1k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'beitv2_base_patch16_224.in1k_ft_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'beitv2_base_patch16_224.in1k_ft_in22k': _cfg(hf_hub_id='timm/', num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'beitv2_large_patch16_224.in1k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'beitv2_large_patch16_224.in1k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'beitv2_large_patch16_224.in1k_ft_in22k': _cfg(hf_hub_id='timm/', num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD)}) def checkpoint_filter_fn(state_dict, model, interpolation='bicubic', antialias=True): state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('module', state_dict) out_dict = {} for (k, v) in state_dict.items(): if 'relative_position_index' in k: continue if 'patch_embed.proj.weight' in k: (O, I, H, W) = model.patch_embed.proj.weight.shape if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed(v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: num_prefix_tokens = 1 v = resample_abs_pos_embed(v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True) elif k.endswith('relative_position_bias_table'): m = model.get_submodule(k[:-29]) if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: v = resize_rel_pos_bias_table(v, new_window_size=m.window_size, new_bias_shape=m.relative_position_bias_table.shape) out_dict[k] = v return out_dict def _create_beit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg(Beit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model @register_model def beit_base_patch16_224(pretrained=False, **kwargs) -> Beit: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1) model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit_base_patch16_384(pretrained=False, **kwargs) -> Beit: model_args = dict(img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1) model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit_large_patch16_224(pretrained=False, **kwargs) -> Beit: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-05) model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit_large_patch16_384(pretrained=False, **kwargs) -> Beit: model_args = dict(img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-05) model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit_large_patch16_512(pretrained=False, **kwargs) -> Beit: model_args = dict(img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-05) model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beitv2_base_patch16_224(pretrained=False, **kwargs) -> Beit: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-05) model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beitv2_large_patch16_224(pretrained=False, **kwargs) -> Beit: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-05) model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/byoanet.py """""" from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks __all__ = [] model_cfgs = dict(botnet26t=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', fixed_input_size=True, self_attn_layer='bottleneck', self_attn_kwargs=dict()), sebotnet33ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333)), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', num_features=1280, attn_layer='se', self_attn_layer='bottleneck', self_attn_kwargs=dict()), botnet50ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', fixed_input_size=True, self_attn_layer='bottleneck', self_attn_kwargs=dict()), eca_botnext26ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', fixed_input_size=True, act_layer='silu', attn_layer='eca', self_attn_layer='bottleneck', self_attn_kwargs=dict(dim_head=16)), halonet_h1=ByoModelCfg(blocks=(ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0)), stem_chs=64, stem_type='7x7', stem_pool='maxpool', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3)), halonet26t=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=2)), sehalonet33ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333)), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', num_features=1280, attn_layer='se', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3)), halonet50ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3)), eca_halonext26ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='eca', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16)), lambda_resnet26t=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='lambda', self_attn_kwargs=dict(r=9)), lambda_resnet50ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', self_attn_layer='lambda', self_attn_kwargs=dict(r=9)), lambda_resnet26rpt_256=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='lambda', self_attn_kwargs=dict(r=None)), haloregnetz_b=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), interleave_blocks(types=('bottle', 'self_attn'), every=3, d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg('self_attn', d=2, c=288, s=2, gs=16, br=3)), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), self_attn_layer='halo', self_attn_kwargs=dict(block_size=7, halo_size=2, qk_ratio=0.33)), lamhalobotnet50ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='lambda', self_attn_kwargs=dict(r=13)), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, self_attn_layer='bottleneck', self_attn_kwargs=dict())), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu'), halo2botnet50ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, self_attn_layer='bottleneck', self_attn_kwargs=dict())), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu')) def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg(ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', 'fixed_input_size': False, 'min_input_size': (3, 224, 224), **kwargs} default_cfgs = generate_default_cfgs({'botnet26t_256.c1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'sebotnet33ts_256.a1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sebotnet33ts_a1h2_256-957e3c3e.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'botnet50ts_256.untrained': _cfg(fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'eca_botnext26ts_256.c1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_c_256-95a898f6.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'halonet_h1.untrained': _cfg(input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'halonet26t.a1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_a1h_256-3083328c.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'sehalonet33ts.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'halonet50ts.a1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_a1h2_256-f3a3daee.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'eca_halonext26ts.c1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_c_256-06906299.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'lambda_resnet26t.c1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_c_256-e5a5c857.pth', hf_hub_id='timm/', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'lambda_resnet50ts.a1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet50ts_a1h_256-b87370f7.pth', hf_hub_id='timm/', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), 'lambda_resnet26rpt_256.c1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_c_256-ab00292d.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'haloregnetz_b.ra3_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/haloregnetz_c_raa_256-c8ad7616.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), first_conv='stem.conv', input_size=(3, 224, 224), pool_size=(7, 7), min_input_size=(3, 224, 224), crop_pct=0.94), 'lamhalobotnet50ts_256.a1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lamhalobotnet50ts_a1h2_256-fe3d9445.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'halo2botnet50ts_256.a1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halo2botnet50ts_a1h2_256-fd9c11a3.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8))}) @register_model def botnet26t_256(pretrained=False, **kwargs) -> ByobNet: kwargs.setdefault('img_size', 256) return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) @register_model def sebotnet33ts_256(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('sebotnet33ts_256', 'sebotnet33ts', pretrained=pretrained, **kwargs) @register_model def botnet50ts_256(pretrained=False, **kwargs) -> ByobNet: kwargs.setdefault('img_size', 256) return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) @register_model def eca_botnext26ts_256(pretrained=False, **kwargs) -> ByobNet: kwargs.setdefault('img_size', 256) return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) @register_model def halonet_h1(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) @register_model def halonet26t(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) @register_model def sehalonet33ts(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) @register_model def halonet50ts(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) @register_model def eca_halonext26ts(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) @register_model def lambda_resnet26t(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) @register_model def lambda_resnet50ts(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs) @register_model def lambda_resnet26rpt_256(pretrained=False, **kwargs) -> ByobNet: kwargs.setdefault('img_size', 256) return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs) @register_model def haloregnetz_b(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('haloregnetz_b', pretrained=pretrained, **kwargs) @register_model def lamhalobotnet50ts_256(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('lamhalobotnet50ts_256', 'lamhalobotnet50ts', pretrained=pretrained, **kwargs) @register_model def halo2botnet50ts_256(pretrained=False, **kwargs) -> ByobNet: return _create_byoanet('halo2botnet50ts_256', 'halo2botnet50ts', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/byobnet.py """""" import math from dataclasses import dataclass, field, replace from functools import partial from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import ClassifierHead, NormMlpClassifierHead, ConvNormAct, BatchNormAct2d, EvoNorm2dS0a, AttentionPool2d, RotAttentionPool2d, DropPath, AvgPool2dSame, create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] @dataclass class ByoBlockCfg: type: Union[str, nn.Module] d: int c: int s: int = 2 gs: Optional[Union[int, Callable]] = None br: float = 1.0 attn_layer: Optional[str] = None attn_kwargs: Optional[Dict[str, Any]] = None self_attn_layer: Optional[str] = None self_attn_kwargs: Optional[Dict[str, Any]] = None block_kwargs: Optional[Dict[str, Any]] = None @dataclass class ByoModelCfg: blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] downsample: str = 'conv1x1' stem_type: str = '3x3' stem_pool: Optional[str] = 'maxpool' stem_chs: Union[int, List[int], Tuple[int, ...]] = 32 width_factor: float = 1.0 num_features: int = 0 zero_init_last: bool = True fixed_input_size: bool = False act_layer: str = 'relu' norm_layer: str = 'batchnorm' aa_layer: str = '' head_hidden_size: Optional[int] = None head_type: str = 'classifier' attn_layer: Optional[str] = None attn_kwargs: dict = field(default_factory=lambda : dict()) self_attn_layer: Optional[str] = None self_attn_kwargs: dict = field(default_factory=lambda : dict()) block_kwargs: Dict[str, Any] = field(default_factory=lambda : dict()) def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1.0, 1.0, 1.0, 1.0), groups=0): c = (64, 128, 256, 512) group_size = 0 if groups > 0: group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for (d, c, wf) in zip(d, c, wf)]) return bcfg def _mobileone_bcfg(d=(2, 8, 10, 1), wf=(1.0, 1.0, 1.0, 1.0), se_blocks=(), num_conv_branches=1): c = (64, 128, 256, 512) prev_c = min(64, c[0] * wf[0]) se_blocks = se_blocks or (0,) * len(d) bcfg = [] for (d, c, w, se) in zip(d, c, wf, se_blocks): scfg = [] for i in range(d): out_c = c * w bk = dict(num_conv_branches=num_conv_branches) ak = {} if i >= d - se: ak['attn_layer'] = 'se' scfg += [ByoBlockCfg(type='one', d=1, c=prev_c, gs=1, block_kwargs=bk, **ak)] scfg += [ByoBlockCfg(type='one', d=1, c=out_c, gs=0, block_kwargs=dict(kernel_size=1, **bk), **ak)] prev_c = out_c bcfg += [scfg] return bcfg def interleave_blocks(types: Tuple[str, str], d, every: Union[int, List[int]]=1, first: bool=False, **kwargs) -> Tuple[ByoBlockCfg]: assert len(types) == 2 if isinstance(every, int): every = list(range(0 if first else every, d, every + 1)) if not every: every = [d - 1] set(every) blocks = [] for i in range(d): block_type = types[1] if i in every else types[0] blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] return tuple(blocks) def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: if not isinstance(stage_blocks_cfg, Sequence): stage_blocks_cfg = (stage_blocks_cfg,) block_cfgs = [] for (i, cfg) in enumerate(stage_blocks_cfg): block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] return block_cfgs def num_groups(group_size, channels): if not group_size: return 1 else: assert channels % group_size == 0 return channels // group_size @dataclass class LayerFn: conv_norm_act: Callable = ConvNormAct norm_act: Callable = BatchNormAct2d act: Callable = nn.ReLU attn: Optional[Callable] = None self_attn: Optional[Callable] = None class DownsampleAvg(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=1, dilation: int=1, apply_act: bool=False, layers: LayerFn=None): super(DownsampleAvg, self).__init__() layers = layers or LayerFn() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) def forward(self, x): return self.conv(self.pool(x)) def create_shortcut(downsample_type: str, in_chs: int, out_chs: int, stride: int, dilation: Tuple[int, int], layers: LayerFn, **kwargs): assert downsample_type in ('avg', 'conv1x1', '') if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: if not downsample_type: return None elif downsample_type == 'avg': return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) else: return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) else: return nn.Identity() class BasicBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: Tuple[int, int]=(1, 1), group_size: Optional[int]=None, bottle_ratio: float=1.0, downsample: str='avg', attn_last: bool=True, linear_out: bool=False, layers: LayerFn=None, drop_block: Callable=None, drop_path_rate: float=0.0): super(BasicBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut(downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers) self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act(mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool=False): if zero_init_last and self.shortcut is not None and (getattr(self.conv2_kxk.bn, 'weight', None) is not None): nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.attn(x) x = self.conv2_kxk(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class BottleneckBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: Tuple[int, int]=(1, 1), bottle_ratio: float=1.0, group_size: Optional[int]=None, downsample: str='avg', attn_last: bool=False, linear_out: bool=False, extra_conv: bool=False, bottle_in: bool=False, layers: LayerFn=None, drop_block: Callable=None, drop_path_rate: float=0.0): super(BottleneckBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut(downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.conv2_kxk = layers.conv_norm_act(mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block) if extra_conv: self.conv2b_kxk = layers.conv_norm_act(mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups) else: self.conv2b_kxk = nn.Identity() self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool=False): if zero_init_last and self.shortcut is not None and (getattr(self.conv3_1x1.bn, 'weight', None) is not None): nn.init.zeros_(self.conv3_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.conv2b_kxk(x) x = self.attn(x) x = self.conv3_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class DarkBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: Tuple[int, int]=(1, 1), bottle_ratio: float=1.0, group_size: Optional[int]=None, downsample: str='avg', attn_last: bool=True, linear_out: bool=False, layers: LayerFn=None, drop_block: Callable=None, drop_path_rate: float=0.0): super(DarkBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut(downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act(mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool=False): if zero_init_last and self.shortcut is not None and (getattr(self.conv2_kxk.bn, 'weight', None) is not None): nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.attn(x) x = self.conv2_kxk(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class EdgeBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: Tuple[int, int]=(1, 1), bottle_ratio: float=1.0, group_size: Optional[int]=None, downsample: str='avg', attn_last: bool=False, linear_out: bool=False, layers: LayerFn=None, drop_block: Callable=None, drop_path_rate: float=0.0): super(EdgeBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut(downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers) self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool=False): if zero_init_last and self.shortcut is not None and (getattr(self.conv2_1x1.bn, 'weight', None) is not None): nn.init.zeros_(self.conv2_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.attn(x) x = self.conv2_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class RepVggBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: Tuple[int, int]=(1, 1), bottle_ratio: float=1.0, group_size: Optional[int]=None, downsample: str='', layers: LayerFn=None, drop_block: Callable=None, drop_path_rate: float=0.0, inference_mode: bool=False): super(RepVggBlock, self).__init__() self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d(in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True) else: self.reparam_conv = None use_ident = in_chs == out_chs and stride == 1 and (dilation[0] == dilation[1]) self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None self.conv_kxk = layers.conv_norm_act(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False) self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def init_weights(self, zero_init_last: bool=False): for m in self.modules(): if isinstance(m, nn.BatchNorm2d): nn.init.normal_(m.weight, 0.1, 0.1) nn.init.normal_(m.bias, 0, 0.1) if hasattr(self.attn, 'reset_parameters'): self.attn.reset_parameters() def forward(self, x): if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) if self.identity is None: x = self.conv_1x1(x) + self.conv_kxk(x) else: identity = self.identity(x) x = self.conv_1x1(x) + self.conv_kxk(x) x = self.drop_path(x) x += identity x = self.attn(x) return self.act(x) def reparameterize(self): if self.reparam_conv is not None: return (kernel, bias) = self._get_kernel_bias() self.reparam_conv = nn.Conv2d(in_channels=self.conv_kxk.conv.in_channels, out_channels=self.conv_kxk.conv.out_channels, kernel_size=self.conv_kxk.conv.kernel_size, stride=self.conv_kxk.conv.stride, padding=self.conv_kxk.conv.padding, dilation=self.conv_kxk.conv.dilation, groups=self.conv_kxk.conv.groups, bias=True) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias for (name, para) in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_1x1') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: kernel_1x1 = 0 bias_1x1 = 0 if self.conv_1x1 is not None: (kernel_1x1, bias_1x1) = self._fuse_bn_tensor(self.conv_1x1) pad = self.conv_kxk.conv.kernel_size[0] // 2 kernel_1x1 = torch.nn.functional.pad(kernel_1x1, [pad, pad, pad, pad]) kernel_identity = 0 bias_identity = 0 if self.identity is not None: (kernel_identity, bias_identity) = self._fuse_bn_tensor(self.identity) (kernel_conv, bias_conv) = self._fuse_bn_tensor(self.conv_kxk) kernel_final = kernel_conv + kernel_1x1 + kernel_identity bias_final = bias_conv + bias_1x1 + bias_identity return (kernel_final, bias_final) def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk.conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk.conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk.conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return (kernel * t, beta - running_mean * gamma / std) class MobileOneBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: Tuple[int, int]=(1, 1), bottle_ratio: float=1.0, group_size: Optional[int]=None, downsample: str='', inference_mode: bool=False, num_conv_branches: int=1, layers: LayerFn=None, drop_block: Callable=None, drop_path_rate: float=0.0) -> None: super(MobileOneBlock, self).__init__() self.num_conv_branches = num_conv_branches self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d(in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True) else: self.reparam_conv = None use_ident = in_chs == out_chs and stride == 1 and (dilation[0] == dilation[1]) self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None convs = [] for _ in range(self.num_conv_branches): convs.append(layers.conv_norm_act(in_chs, out_chs, kernel_size=kernel_size, stride=stride, groups=groups, apply_act=False)) self.conv_kxk = nn.ModuleList(convs) self.conv_scale = None if kernel_size > 1: self.conv_scale = layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) identity_out = 0 if self.identity is not None: identity_out = self.identity(x) scale_out = 0 if self.conv_scale is not None: scale_out = self.conv_scale(x) out = scale_out for ck in self.conv_kxk: out += ck(x) out = self.drop_path(out) out += identity_out return self.act(self.attn(out)) def reparameterize(self): if self.reparam_conv is not None: return (kernel, bias) = self._get_kernel_bias() self.reparam_conv = nn.Conv2d(in_channels=self.conv_kxk[0].conv.in_channels, out_channels=self.conv_kxk[0].conv.out_channels, kernel_size=self.conv_kxk[0].conv.kernel_size, stride=self.conv_kxk[0].conv.stride, padding=self.conv_kxk[0].conv.padding, dilation=self.conv_kxk[0].conv.dilation, groups=self.conv_kxk[0].conv.groups, bias=True) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias for (name, para) in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_scale') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: kernel_scale = 0 bias_scale = 0 if self.conv_scale is not None: (kernel_scale, bias_scale) = self._fuse_bn_tensor(self.conv_scale) pad = self.conv_kxk[0].conv.kernel_size[0] // 2 kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) kernel_identity = 0 bias_identity = 0 if self.identity is not None: (kernel_identity, bias_identity) = self._fuse_bn_tensor(self.identity) kernel_conv = 0 bias_conv = 0 for ix in range(self.num_conv_branches): (_kernel, _bias) = self._fuse_bn_tensor(self.conv_kxk[ix]) kernel_conv += _kernel bias_conv += _bias kernel_final = kernel_conv + kernel_scale + kernel_identity bias_final = bias_conv + bias_scale + bias_identity return (kernel_final, bias_final) def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk[0].conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk[0].conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk[0].conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return (kernel * t, beta - running_mean * gamma / std) class SelfAttnBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: Tuple[int, int]=(1, 1), bottle_ratio: float=1.0, group_size: Optional[int]=None, downsample: str='avg', extra_conv: bool=False, linear_out: bool=False, bottle_in: bool=False, post_attn_na: bool=True, feat_size: Optional[Tuple[int, int]]=None, layers: LayerFn=None, drop_block: Callable=None, drop_path_rate: float=0.0): super(SelfAttnBlock, self).__init__() assert layers is not None mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut(downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) if extra_conv: self.conv2_kxk = layers.conv_norm_act(mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block) stride = 1 else: self.conv2_kxk = nn.Identity() opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool=False): if zero_init_last and self.shortcut is not None and (getattr(self.conv3_1x1.bn, 'weight', None) is not None): nn.init.zeros_(self.conv3_1x1.bn.weight) if hasattr(self.self_attn, 'reset_parameters'): self.self_attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.self_attn(x) x = self.post_attn(x) x = self.conv3_1x1(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) _block_registry = dict(basic=BasicBlock, bottle=BottleneckBlock, dark=DarkBlock, edge=EdgeBlock, rep=RepVggBlock, one=MobileOneBlock, self_attn=SelfAttnBlock) def register_block(block_type: str, block_fn: nn.Module): _block_registry[block_type] = block_fn def create_block(block: Union[str, nn.Module], **kwargs): if isinstance(block, (nn.Module, partial)): return block(**kwargs) assert block in _block_registry, f'Unknown block type ({block}' return _block_registry[block](**kwargs) class Stem(nn.Sequential): def __init__(self, in_chs: int, out_chs: Union[int, List[int], Tuple[int, ...]], kernel_size: int=3, stride: int=4, pool: str='maxpool', num_rep: int=3, num_act: Optional[int]=None, chs_decay: float=0.5, layers: LayerFn=None): super().__init__() assert stride in (2, 4) layers = layers or LayerFn() if isinstance(out_chs, (list, tuple)): num_rep = len(out_chs) stem_chs = out_chs else: stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] self.stride = stride self.feature_info = [] prev_feat = '' stem_strides = [2] + [1] * (num_rep - 1) if stride == 4 and (not pool): stem_strides[-1] = 2 num_act = num_rep if num_act is None else num_act stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act prev_chs = in_chs curr_stride = 1 last_feat_idx = -1 for (i, (ch, s, na)) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): layer_fn = layers.conv_norm_act if na else create_conv2d conv_name = f'conv{i + 1}' if i > 0 and s > 1: last_feat_idx = i - 1 self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) prev_chs = ch curr_stride *= s prev_feat = conv_name if pool: pool = pool.lower() assert pool in ('max', 'maxpool', 'avg', 'avgpool', 'max2', 'avg2') last_feat_idx = i self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) if pool == 'max2': self.add_module('pool', nn.MaxPool2d(2)) elif pool == 'avg2': self.add_module('pool', nn.AvgPool2d(2)) elif 'max' in pool: self.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) elif 'avg' in pool: self.add_module('pool', nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False)) curr_stride *= 2 prev_feat = 'pool' self.last_feat_idx = last_feat_idx if last_feat_idx >= 0 else None self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) assert curr_stride == stride def forward_intermediates(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: intermediate: Optional[torch.Tensor] = None for (i, m) in enumerate(self): x = m(x) if self.last_feat_idx is not None and i == self.last_feat_idx: intermediate = x return (x, intermediate) def create_byob_stem(in_chs: int, out_chs: int, stem_type: str='', pool_type: str='', feat_prefix: str='stem', layers: LayerFn=None): layers = layers or LayerFn() assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', 'one', '7x7', '3x3') if 'quad' in stem_type: num_act = 2 if 'quad2' in stem_type else None stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) elif 'tiered' in stem_type: stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) elif 'deep' in stem_type: stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) elif 'rep' in stem_type: stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) elif 'one' in stem_type: stem = MobileOneBlock(in_chs, out_chs, kernel_size=3, stride=2, layers=layers) elif '7x7' in stem_type: if pool_type: stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) elif isinstance(out_chs, (tuple, list)): stem = Stem(in_chs, out_chs, 3, pool=pool_type, layers=layers) elif pool_type: stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) if isinstance(stem, Stem): feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] else: feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix, stage=0)] return (stem, feature_info) def reduce_feat_size(feat_size, stride=2): return None if feat_size is None else tuple([s // stride for s in feat_size]) def override_kwargs(block_kwargs, model_kwargs): out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs return out_kwargs or {} def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg): layer_fns = block_kwargs['layers'] attn_set = block_cfg.attn_layer is not None if attn_set or block_cfg.attn_kwargs is not None: if attn_set and (not block_cfg.attn_layer): attn_layer = None else: attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) attn_layer = block_cfg.attn_layer or model_cfg.attn_layer attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None layer_fns = replace(layer_fns, attn=attn_layer) self_attn_set = block_cfg.self_attn_layer is not None if self_attn_set or block_cfg.self_attn_kwargs is not None: if self_attn_set and (not block_cfg.self_attn_layer): self_attn_layer = None else: self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) if self_attn_layer is not None else None layer_fns = replace(layer_fns, self_attn=self_attn_layer) block_kwargs['layers'] = layer_fns block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) def create_byob_stages(cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], feat_size: Optional[int]=None, layers: Optional[LayerFn]=None, block_kwargs_fn: Optional[Callable]=update_block_kwargs): layers = layers or LayerFn() feature_info = [] block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] dilation = 1 net_stride = stem_feat['reduction'] prev_chs = stem_feat['num_chs'] prev_feat = stem_feat stages = [] for (stage_idx, stage_block_cfgs) in enumerate(block_cfgs): stride = stage_block_cfgs[0].s if stride != 1 and prev_feat: feature_info.append(prev_feat) if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for (block_idx, block_cfg) in enumerate(stage_block_cfgs): out_chs = make_divisible(block_cfg.c * cfg.width_factor) group_size = block_cfg.gs if isinstance(group_size, Callable): group_size = group_size(out_chs, block_idx) block_kwargs = dict(in_chs=prev_chs, out_chs=out_chs, stride=stride if block_idx == 0 else 1, dilation=(first_dilation, dilation), group_size=group_size, bottle_ratio=block_cfg.br, downsample=cfg.downsample, drop_path_rate=dpr[stage_idx][block_idx], layers=layers) if block_cfg.type in ('self_attn',): block_kwargs['feat_size'] = feat_size block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) blocks += [create_block(block_cfg.type, **block_kwargs)] first_dilation = dilation prev_chs = out_chs if stride > 1 and block_idx == 0: feat_size = reduce_feat_size(feat_size, stride) stages += [nn.Sequential(*blocks)] prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}', stage=stage_idx + 1) feature_info.append(prev_feat) return (nn.Sequential(*stages), feature_info, feat_size) def get_layer_fns(cfg: ByoModelCfg, allow_aa: bool=True): act = get_act_layer(cfg.act_layer) norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act) if cfg.aa_layer and allow_aa: conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act, aa_layer=cfg.aa_layer) else: conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act) attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) return layer_fn class ByobNet(nn.Module): def __init__(self, cfg: ByoModelCfg, num_classes: int=1000, in_chans: int=3, global_pool: Optional[str]=None, output_stride: int=32, img_size: Optional[Union[int, Tuple[int, int]]]=None, drop_rate: float=0.0, drop_path_rate: float=0.0, zero_init_last: bool=True, **kwargs): super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) stem_layers = get_layer_fns(cfg, allow_aa=False) stage_layers = get_layer_fns(cfg) if cfg.fixed_input_size: assert img_size is not None, 'img_size argument is required for fixed input size model' feat_size = to_2tuple(img_size) if img_size is not None else None self.feature_info = [] if isinstance(cfg.stem_chs, (list, tuple)): stem_chs = [int(round(c * cfg.width_factor)) for c in cfg.stem_chs] else: stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) (self.stem, stem_feat) = create_byob_stem(in_chs=in_chans, out_chs=stem_chs, stem_type=cfg.stem_type, pool_type=cfg.stem_pool, layers=stem_layers) self.feature_info.extend(stem_feat[:-1]) feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) (self.stages, stage_feat, feat_size) = create_byob_stages(cfg, drop_path_rate, output_stride, stem_feat[-1], layers=stage_layers, feat_size=feat_size) self.feature_info.extend(stage_feat[:-1]) reduction = stage_feat[-1]['reduction'] prev_chs = stage_feat[-1]['num_chs'] if cfg.num_features: self.num_features = int(round(cfg.width_factor * cfg.num_features)) self.final_conv = stage_layers.conv_norm_act(prev_chs, self.num_features, 1) else: self.num_features = prev_chs self.final_conv = nn.Identity() self.feature_info += [dict(num_chs=self.num_features, reduction=reduction, module='final_conv', stage=len(self.stages))] self.stage_ends = [f['stage'] for f in self.feature_info] self.head_hidden_size = self.num_features assert cfg.head_type in ('', 'classifier', 'mlp', 'attn_abs', 'attn_rot') if cfg.head_type == 'mlp': if global_pool is None: global_pool = 'avg' self.head = NormMlpClassifierHead(self.num_features, num_classes, hidden_size=cfg.head_hidden_size, pool_type=global_pool, norm_layer=cfg.norm_layer, act_layer=cfg.act_layer, drop_rate=self.drop_rate) self.head_hidden_size = self.head.hidden_size elif cfg.head_type == 'attn_abs': if global_pool is None: global_pool = 'token' assert global_pool in ('', 'token') self.head = AttentionPool2d(self.num_features, embed_dim=cfg.head_hidden_size, out_features=num_classes, feat_size=feat_size, pool_type=global_pool, drop_rate=self.drop_rate, qkv_separate=True) self.head_hidden_size = self.head.embed_dim elif cfg.head_type == 'attn_rot': if global_pool is None: global_pool = 'token' assert global_pool in ('', 'token') self.head = RotAttentionPool2d(self.num_features, embed_dim=cfg.head_hidden_size, out_features=num_classes, ref_feat_size=feat_size, pool_type=global_pool, drop_rate=self.drop_rate, qkv_separate=True) self.head_hidden_size = self.head.embed_dim else: if global_pool is None: global_pool = 'avg' assert cfg.head_hidden_size is None self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) self.global_pool = global_pool named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks=[('^stages\\.(\\d+)' if coarse else '^stages\\.(\\d+)\\.(\\d+)', None), ('^final_conv', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False, exclude_final_conv: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] feat_idx = 0 if hasattr(self.stem, 'forward_intermediates'): (x, x_inter) = self.stem.forward_intermediates(x) else: (x, x_inter) = (self.stem(x), None) if feat_idx in take_indices: intermediates.append(x if x_inter is None else x_inter) last_idx = self.stage_ends[-1] if torch.jit.is_scripting() or not stop_early: stages = self.stages else: stages = self.stages[:max_index] for stage in stages: feat_idx += 1 x = stage(x) if not exclude_final_conv and feat_idx == last_idx: x = self.final_conv(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if exclude_final_conv and feat_idx == last_idx: x = self.final_conv(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.stages = self.stages[:max_index] if max_index < self.stage_ends[-1]: self.final_conv = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name='', zero_init_last=False): if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.BatchNorm2d): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights(zero_init_last=zero_init_last) model_cfgs = dict(gernet_l=ByoModelCfg(blocks=(ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.0), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.0), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.0), ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.0)), stem_chs=32, stem_pool=None, num_features=2560), gernet_m=ByoModelCfg(blocks=(ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.0), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.0), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.0), ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.0)), stem_chs=32, stem_pool=None, num_features=2560), gernet_s=ByoModelCfg(blocks=(ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.0), ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.0), ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.0), ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.0)), stem_chs=13, stem_pool=None, num_features=1920), repvgg_a0=ByoModelCfg(blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(0.75, 0.75, 0.75, 2.5)), stem_type='rep', stem_chs=48), repvgg_a1=ByoModelCfg(blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1, 1, 1, 2.5)), stem_type='rep', stem_chs=64), repvgg_a2=ByoModelCfg(blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), stem_type='rep', stem_chs=64), repvgg_b0=ByoModelCfg(blocks=_rep_vgg_bcfg(wf=(1.0, 1.0, 1.0, 2.5)), stem_type='rep', stem_chs=64), repvgg_b1=ByoModelCfg(blocks=_rep_vgg_bcfg(wf=(2.0, 2.0, 2.0, 4.0)), stem_type='rep', stem_chs=64), repvgg_b1g4=ByoModelCfg(blocks=_rep_vgg_bcfg(wf=(2.0, 2.0, 2.0, 4.0), groups=4), stem_type='rep', stem_chs=64), repvgg_b2=ByoModelCfg(blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.0)), stem_type='rep', stem_chs=64), repvgg_b2g4=ByoModelCfg(blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.0), groups=4), stem_type='rep', stem_chs=64), repvgg_b3=ByoModelCfg(blocks=_rep_vgg_bcfg(wf=(3.0, 3.0, 3.0, 5.0)), stem_type='rep', stem_chs=64), repvgg_b3g4=ByoModelCfg(blocks=_rep_vgg_bcfg(wf=(3.0, 3.0, 3.0, 5.0), groups=4), stem_type='rep', stem_chs=64), repvgg_d2se=ByoModelCfg(blocks=_rep_vgg_bcfg(d=(8, 14, 24, 1), wf=(2.5, 2.5, 2.5, 5.0)), stem_type='rep', stem_chs=64, attn_layer='se', attn_kwargs=dict(rd_ratio=0.0625, rd_divisor=1)), resnet51q=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0)), stem_chs=128, stem_type='quad2', stem_pool=None, num_features=2048, act_layer='silu'), resnet61q=ByoModelCfg(blocks=(ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0)), stem_chs=128, stem_type='quad', stem_pool=None, num_features=2048, act_layer='silu', block_kwargs=dict(extra_conv=True)), resnext26ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu'), gcresnext26ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca'), seresnext26ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='se'), eca_resnext26ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='eca'), bat_resnext26ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='bat', attn_kwargs=dict(block_size=8)), resnet32ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='', num_features=0, act_layer='silu'), resnet33ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu'), gcresnet33ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='gca'), seresnet33ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='se'), eca_resnet33ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='eca'), gcresnet50t=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='', attn_layer='gca'), gcresnext50ts=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25)), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca'), regnetz_b16=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3)), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), regnetz_c16=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4)), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), regnetz_d32=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4)), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), regnetz_d8=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4)), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), regnetz_e8=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4)), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=2048, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), regnetz_b16_evos=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3)), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), regnetz_c16_evos=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4)), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), regnetz_d8_evos=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4)), stem_chs=64, stem_type='deep', stem_pool='', downsample='', num_features=1792, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True)), mobileone_s0=ByoModelCfg(blocks=_mobileone_bcfg(wf=(0.75, 1.0, 1.0, 2.0), num_conv_branches=4), stem_type='one', stem_chs=48), mobileone_s1=ByoModelCfg(blocks=_mobileone_bcfg(wf=(1.5, 1.5, 2.0, 2.5)), stem_type='one', stem_chs=64), mobileone_s2=ByoModelCfg(blocks=_mobileone_bcfg(wf=(1.5, 2.0, 2.5, 4.0)), stem_type='one', stem_chs=64), mobileone_s3=ByoModelCfg(blocks=_mobileone_bcfg(wf=(2.0, 2.5, 3.0, 4.0)), stem_type='one', stem_chs=64), mobileone_s4=ByoModelCfg(blocks=_mobileone_bcfg(wf=(3.0, 3.5, 3.5, 4.0), se_blocks=(0, 0, 5, 1)), stem_type='one', stem_chs=64), resnet50_clip=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25)), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs'), resnet101_clip=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=23, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25)), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs'), resnet50x4_clip=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=4, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=6, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=10, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=2048, s=2, br=0.25)), width_factor=1.25, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs'), resnet50x16_clip=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=6, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=8, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=18, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=8, c=2048, s=2, br=0.25)), width_factor=1.5, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs'), resnet50x64_clip=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=15, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=36, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=10, c=2048, s=2, br=0.25)), width_factor=2.0, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs'), resnet50_mlp=ByoModelCfg(blocks=(ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25)), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_hidden_size=1024, head_type='mlp'), test_byobnet=ByoModelCfg(blocks=(ByoBlockCfg(type='edge', d=1, c=32, s=2, gs=0, br=0.5), ByoBlockCfg(type='dark', d=1, c=64, s=2, gs=0, br=0.5), ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=1, c=256, s=2, gs=64, br=0.25)), stem_chs=24, downsample='avg', stem_pool='', act_layer='relu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25))) for k in ('resnet50_clip', 'resnet101_clip', 'resnet50x4_clip', 'resnet50x16_clip', 'resnet50x64_clip'): model_cfgs[k + '_gap'] = replace(model_cfgs[k], head_type='classifier') def _convert_openai_clip(state_dict: Dict[str, torch.Tensor], model: ByobNet, prefix: str='visual.') -> Dict[str, torch.Tensor]: model_has_attn_pool = isinstance(model.head, (RotAttentionPool2d, AttentionPool2d)) import re def _stage_sub(m): stage_idx = int(m.group(1)) - 1 (layer_idx, layer_type, layer_id) = (int(m.group(2)), m.group(3), int(m.group(4))) prefix_str = f'stages.{stage_idx}.{layer_idx}.' id_map = {1: 'conv1_1x1.', 2: 'conv2_kxk.', 3: 'conv3_1x1.'} suffix_str = id_map[layer_id] + layer_type return prefix_str + suffix_str def _down_sub(m): stage_idx = int(m.group(1)) - 1 (layer_idx, layer_id) = (int(m.group(2)), int(m.group(3))) return f'stages.{stage_idx}.{layer_idx}.shortcut.' + ('conv.conv' if layer_id == 0 else 'conv.bn') out_dict = {} for (k, v) in state_dict.items(): if not k.startswith(prefix): continue k = re.sub(f'{prefix}conv([0-9])', 'stem.conv\\1.conv', k) k = re.sub(f'{prefix}bn([0-9])', 'stem.conv\\1.bn', k) k = re.sub(f'{prefix}layer([0-9])\\.([0-9]+)\\.([a-z]+)([0-9])', _stage_sub, k) k = re.sub(f'{prefix}layer([0-9])\\.([0-9]+)\\.downsample\\.([0-9])', _down_sub, k) if k.startswith(f'{prefix}attnpool'): if not model_has_attn_pool: continue k = k.replace(prefix + 'attnpool', 'head') k = k.replace('positional_embedding', 'pos_embed') k = k.replace('q_proj', 'q') k = k.replace('k_proj', 'k') k = k.replace('v_proj', 'v') k = k.replace('c_proj', 'proj') out_dict[k] = v return out_dict def checkpoint_filter_fn(state_dict: Dict[str, torch.Tensor], model: ByobNet): if 'visual.conv1.weight' in state_dict: state_dict = _convert_openai_clip(state_dict, model) return state_dict def _create_byobnet(variant, pretrained=False, **kwargs): return build_model_with_cfg(ByobNet, variant, pretrained, model_cfg=model_cfgs[variant], pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs} def _cfgr(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'gernet_s.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_m.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_l.idstcv_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), 'repvgg_a0.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a1.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a2.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b0.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1g4.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2g4.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3g4.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_d2se.rvgg_in1k': _cfg(hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'resnet51q.ra2_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet61q.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnext26ts.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnext26ts.ch_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext26ts.ch_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnext26ts.ch_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'bat_resnext26ts.ch_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', min_input_size=(3, 256, 256)), 'resnet32ts.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet33ts.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet33ts.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnet33ts.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnet33ts.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet50t.ra2_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext50ts.ch_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'regnetz_b16.ra3_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.94, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'regnetz_c16.ra3_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_d32.ra3_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8.ra3_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_e8.ra3_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_b16_evos.untrained': _cfgr(first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.95, test_input_size=(3, 288, 288)), 'regnetz_c16_evos.ch_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8_evos.ch_in1k': _cfgr(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'mobileone_s0.apple_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.875, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv')), 'mobileone_s1.apple_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv')), 'mobileone_s2.apple_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv')), 'mobileone_s3.apple_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv')), 'mobileone_s4.apple_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv')), 'resnet50_clip.openai': _cfgr(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj'), 'resnet101_clip.openai': _cfgr(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj'), 'resnet50x4_clip.openai': _cfgr(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=640, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 288, 288), pool_size=(9, 9), classifier='head.proj'), 'resnet50x16_clip.openai': _cfgr(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=768, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 384, 384), pool_size=(12, 12), classifier='head.proj'), 'resnet50x64_clip.openai': _cfgr(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 448, 448), pool_size=(14, 14), classifier='head.proj'), 'resnet50_clip_gap.openai': _cfgr(hf_hub_id='timm/resnet50_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7)), 'resnet101_clip_gap.openai': _cfgr(hf_hub_id='timm/resnet101_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7)), 'resnet50x4_clip_gap.openai': _cfgr(hf_hub_id='timm/resnet50x4_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 288, 288), pool_size=(9, 9)), 'resnet50x16_clip_gap.openai': _cfgr(hf_hub_id='timm/resnet50x16_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12)), 'resnet50x64_clip_gap.openai': _cfgr(hf_hub_id='timm/resnet50x64_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 448, 448), pool_size=(14, 14)), 'resnet50_mlp.untrained': _cfgr(input_size=(3, 256, 256), pool_size=(8, 8)), 'test_byobnet.r160_in1k': _cfgr(hf_hub_id='timm/', first_conv='stem.conv', input_size=(3, 160, 160), crop_pct=0.875, pool_size=(5, 5))}) @register_model def gernet_l(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) @register_model def gernet_m(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) @register_model def gernet_s(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) @register_model def repvgg_a0(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_a0', pretrained=pretrained, **kwargs) @register_model def repvgg_a1(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_a1', pretrained=pretrained, **kwargs) @register_model def repvgg_a2(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) @register_model def repvgg_b0(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) @register_model def repvgg_b1(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) @register_model def repvgg_b1g4(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b2(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) @register_model def repvgg_b2g4(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b3(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) @register_model def repvgg_b3g4(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) @register_model def repvgg_d2se(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('repvgg_d2se', pretrained=pretrained, **kwargs) @register_model def resnet51q(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) @register_model def resnet61q(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) @register_model def resnext26ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) @register_model def gcresnext26ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) @register_model def seresnext26ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) @register_model def eca_resnext26ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) @register_model def bat_resnext26ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) @register_model def resnet32ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) @register_model def resnet33ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet33ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) @register_model def seresnet33ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) @register_model def eca_resnet33ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet50t(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) @register_model def gcresnext50ts(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) @register_model def regnetz_b16(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs) @register_model def regnetz_c16(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs) @register_model def regnetz_d32(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs) @register_model def regnetz_d8(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs) @register_model def regnetz_e8(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs) @register_model def regnetz_b16_evos(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_c16_evos(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_d8_evos(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs) @register_model def mobileone_s0(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('mobileone_s0', pretrained=pretrained, **kwargs) @register_model def mobileone_s1(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('mobileone_s1', pretrained=pretrained, **kwargs) @register_model def mobileone_s2(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('mobileone_s2', pretrained=pretrained, **kwargs) @register_model def mobileone_s3(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('mobileone_s3', pretrained=pretrained, **kwargs) @register_model def mobileone_s4(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs) @register_model def resnet50_clip(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50_clip', pretrained=pretrained, **kwargs) @register_model def resnet101_clip(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet101_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x4_clip(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50x4_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x16_clip(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50x16_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x64_clip(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50x64_clip', pretrained=pretrained, **kwargs) @register_model def resnet50_clip_gap(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet101_clip_gap(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet101_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x4_clip_gap(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50x4_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x16_clip_gap(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50x16_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x64_clip_gap(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50x64_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50_mlp(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('resnet50_mlp', pretrained=pretrained, **kwargs) @register_model def test_byobnet(pretrained=False, **kwargs) -> ByobNet: return _create_byobnet('test_byobnet', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/cait.py """""" from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn'] class ClassAttn(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, N, C) = x.shape q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) if self.fused_attn: x_cls = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_cls = attn @ v x_cls = x_cls.transpose(1, 2).reshape(B, 1, C) x_cls = self.proj(x_cls) x_cls = self.proj_drop(x_cls) return x_cls class LayerScaleBlockClassAttn(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=ClassAttn, mlp_block=Mlp, init_values=0.0001): super().__init__() self.norm1 = norm_layer(dim) self.attn = attn_block(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x, x_cls): u = torch.cat((x_cls, x), dim=1) x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u))) x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls))) return x_cls class TalkingHeadAttn(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_l = nn.Linear(num_heads, num_heads) self.proj_w = nn.Linear(num_heads, num_heads) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k, v) = (qkv[0] * self.scale, qkv[1], qkv[2]) attn = q @ k.transpose(-2, -1) attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = attn.softmax(dim=-1) attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScaleBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, mlp_block=Mlp, init_values=0.0001): super().__init__() self.norm1 = norm_layer(dim) self.attn = attn_block(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class Cait(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True, drop_rate=0.0, pos_drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, block_layers=LayerScaleBlock, block_layers_token=LayerScaleBlockClassAttn, patch_layer=PatchEmbed, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, attn_block=TalkingHeadAttn, mlp_block=Mlp, init_values=0.0001, attn_block_token_only=ClassAttn, mlp_block_token_only=Mlp, depth_token_only=2, mlp_ratio_token_only=4.0): super().__init__() assert global_pool in ('', 'token', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.grad_checkpointing = False self.patch_embed = patch_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_rate) dpr = [drop_path_rate for i in range(depth)] self.blocks = nn.Sequential(*[block_layers(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, attn_block=attn_block, mlp_block=mlp_block, init_values=init_values) for i in range(depth)]) self.feature_info = [dict(num_chs=embed_dim, reduction=r, module=f'blocks.{i}') for i in range(depth)] self.blocks_token_only = nn.ModuleList([block_layers_token(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio_token_only, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, attn_block=attn_block_token_only, mlp_block=mlp_block_token_only, init_values=init_values) for _ in range(depth_token_only)]) self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.pos_embed, std=0.02) trunc_normal_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse=False): def _matcher(name): if any([name.startswith(n) for n in ('cls_token', 'pos_embed', 'patch_embed')]): return 0 elif name.startswith('blocks.'): return int(name.split('.')[1]) + 1 elif name.startswith('blocks_token_only.'): to_offset = len(self.blocks) - len(self.blocks_token_only) + 1 return int(name.split('.')[1]) + to_offset elif name.startswith('norm.'): return len(self.blocks) else: return float('inf') return _matcher @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape x = self.patch_embed(x) x = x + self.pos_embed x = self.pos_drop(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x) if i in take_indices: intermediates.append(self.norm(x) if norm else x) if reshape: (H, W) = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if intermediates_only: return intermediates cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) for (i, blk) in enumerate(self.blocks_token_only): cls_tokens = blk(x, cls_tokens) x = torch.cat((cls_tokens, x), dim=1) x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.blocks_token_only = nn.ModuleList() self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) x = x + self.pos_embed x = self.pos_drop(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) for (i, blk) in enumerate(self.blocks_token_only): cls_tokens = blk(x, cls_tokens) x = torch.cat((cls_tokens, x), dim=1) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model=None): if 'model' in state_dict: state_dict = state_dict['model'] checkpoint_no_module = {} for (k, v) in state_dict.items(): checkpoint_no_module[k.replace('module.', '')] = v return checkpoint_no_module def _create_cait(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg(Cait, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'cait_xxs24_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth', input_size=(3, 224, 224)), 'cait_xxs24_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth'), 'cait_xxs36_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth', input_size=(3, 224, 224)), 'cait_xxs36_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth'), 'cait_xs24_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth'), 'cait_s24_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/S24_224.pth', input_size=(3, 224, 224)), 'cait_s24_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/S24_384.pth'), 'cait_s36_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/S36_384.pth'), 'cait_m36_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/M36_384.pth'), 'cait_m48_448.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/M48_448.pth', input_size=(3, 448, 448))}) @register_model def cait_xxs24_224(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-05) model = _create_cait('cait_xxs24_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xxs24_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-05) model = _create_cait('cait_xxs24_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xxs36_224(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-05) model = _create_cait('cait_xxs36_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xxs36_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-05) model = _create_cait('cait_xxs36_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xs24_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_values=1e-05) model = _create_cait('cait_xs24_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_s24_224(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-05) model = _create_cait('cait_s24_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_s24_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-05) model = _create_cait('cait_s24_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_s36_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_values=1e-06) model = _create_cait('cait_s36_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_m36_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_values=1e-06) model = _create_cait('cait_m36_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_m48_448(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_values=1e-06) model = _create_cait('cait_m48_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/coat.py """""" from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, LayerNorm from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['CoaT'] class ConvRelPosEnc(nn.Module): def __init__(self, head_chs, num_heads, window): super().__init__() if isinstance(window, int): window = {window: num_heads} self.window = window elif isinstance(window, dict): self.window = window else: raise ValueError() self.conv_list = nn.ModuleList() self.head_splits = [] for (cur_window, cur_head_split) in window.items(): dilation = 1 padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2 cur_conv = nn.Conv2d(cur_head_split * head_chs, cur_head_split * head_chs, kernel_size=(cur_window, cur_window), padding=(padding_size, padding_size), dilation=(dilation, dilation), groups=cur_head_split * head_chs) self.conv_list.append(cur_conv) self.head_splits.append(cur_head_split) self.channel_splits = [x * head_chs for x in self.head_splits] def forward(self, q, v, size: Tuple[int, int]): (B, num_heads, N, C) = q.shape (H, W) = size _assert(N == 1 + H * W, '') q_img = q[:, :, 1:, :] v_img = v[:, :, 1:, :] v_img = v_img.transpose(-1, -2).reshape(B, num_heads * C, H, W) v_img_list = torch.split(v_img, self.channel_splits, dim=1) conv_v_img_list = [] for (i, conv) in enumerate(self.conv_list): conv_v_img_list.append(conv(v_img_list[i])) conv_v_img = torch.cat(conv_v_img_list, dim=1) conv_v_img = conv_v_img.reshape(B, num_heads, C, H * W).transpose(-1, -2) EV_hat = q_img * conv_v_img EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) return EV_hat class FactorAttnConvRelPosEnc(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0, shared_crpe=None): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.crpe = shared_crpe def forward(self, x, size: Tuple[int, int]): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) k_softmax = k.softmax(dim=2) factor_att = k_softmax.transpose(-1, -2) @ v factor_att = q @ factor_att crpe = self.crpe(q, v, size=size) x = self.scale * factor_att + crpe x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class ConvPosEnc(nn.Module): def __init__(self, dim, k=3): super(ConvPosEnc, self).__init__() self.proj = nn.Conv2d(dim, dim, k, 1, k // 2, groups=dim) def forward(self, x, size: Tuple[int, int]): (B, N, C) = x.shape (H, W) = size _assert(N == 1 + H * W, '') (cls_token, img_tokens) = (x[:, :1], x[:, 1:]) feat = img_tokens.transpose(1, 2).view(B, C, H, W) x = self.proj(feat) + feat x = x.flatten(2).transpose(1, 2) x = torch.cat((cls_token, x), dim=1) return x class SerialBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None): super().__init__() self.cpe = shared_cpe self.norm1 = norm_layer(dim) self.factoratt_crpe = FactorAttnConvRelPosEnc(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpe) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop) def forward(self, x, size: Tuple[int, int]): x = self.cpe(x, size) cur = self.norm1(x) cur = self.factoratt_crpe(cur, size) x = x + self.drop_path(cur) cur = self.norm2(x) cur = self.mlp(cur) x = x + self.drop_path(cur) return x class ParallelBlock(nn.Module): def __init__(self, dims, num_heads, mlp_ratios=[], qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_crpes=None): super().__init__() self.norm12 = norm_layer(dims[1]) self.norm13 = norm_layer(dims[2]) self.norm14 = norm_layer(dims[3]) self.factoratt_crpe2 = FactorAttnConvRelPosEnc(dims[1], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpes[1]) self.factoratt_crpe3 = FactorAttnConvRelPosEnc(dims[2], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpes[2]) self.factoratt_crpe4 = FactorAttnConvRelPosEnc(dims[3], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpes[3]) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm22 = norm_layer(dims[1]) self.norm23 = norm_layer(dims[2]) self.norm24 = norm_layer(dims[3]) assert dims[1] == dims[2] == dims[3] assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3] mlp_hidden_dim = int(dims[1] * mlp_ratios[1]) self.mlp2 = self.mlp3 = self.mlp4 = Mlp(in_features=dims[1], hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop) def upsample(self, x, factor: float, size: Tuple[int, int]): return self.interpolate(x, scale_factor=factor, size=size) def downsample(self, x, factor: float, size: Tuple[int, int]): return self.interpolate(x, scale_factor=1.0 / factor, size=size) def interpolate(self, x, scale_factor: float, size: Tuple[int, int]): (B, N, C) = x.shape (H, W) = size _assert(N == 1 + H * W, '') cls_token = x[:, :1, :] img_tokens = x[:, 1:, :] img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W) img_tokens = F.interpolate(img_tokens, scale_factor=scale_factor, recompute_scale_factor=False, mode='bilinear', align_corners=False) img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2) out = torch.cat((cls_token, img_tokens), dim=1) return out def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]): (_, S2, S3, S4) = sizes cur2 = self.norm12(x2) cur3 = self.norm13(x3) cur4 = self.norm14(x4) cur2 = self.factoratt_crpe2(cur2, size=S2) cur3 = self.factoratt_crpe3(cur3, size=S3) cur4 = self.factoratt_crpe4(cur4, size=S4) upsample3_2 = self.upsample(cur3, factor=2.0, size=S3) upsample4_3 = self.upsample(cur4, factor=2.0, size=S4) upsample4_2 = self.upsample(cur4, factor=4.0, size=S4) downsample2_3 = self.downsample(cur2, factor=2.0, size=S2) downsample3_4 = self.downsample(cur3, factor=2.0, size=S3) downsample2_4 = self.downsample(cur2, factor=4.0, size=S2) cur2 = cur2 + upsample3_2 + upsample4_2 cur3 = cur3 + upsample4_3 + downsample2_3 cur4 = cur4 + downsample3_4 + downsample2_4 x2 = x2 + self.drop_path(cur2) x3 = x3 + self.drop_path(cur3) x4 = x4 + self.drop_path(cur4) cur2 = self.norm22(x2) cur3 = self.norm23(x3) cur4 = self.norm24(x4) cur2 = self.mlp2(cur2) cur3 = self.mlp3(cur3) cur4 = self.mlp4(cur4) x2 = x2 + self.drop_path(cur2) x3 = x3 + self.drop_path(cur3) x4 = x4 + self.drop_path(cur4) return (x1, x2, x3, x4) class CoaT(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=(64, 128, 320, 512), serial_depths=(3, 4, 6, 3), parallel_depth=0, num_heads=8, mlp_ratios=(4, 4, 4, 4), qkv_bias=True, drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=LayerNorm, return_interm_layers=False, out_features=None, crpe_window=None, global_pool='token'): super().__init__() assert global_pool in ('token', 'avg') crpe_window = crpe_window or {3: 2, 5: 3, 7: 3} self.return_interm_layers = return_interm_layers self.out_features = out_features self.embed_dims = embed_dims self.num_features = self.head_hidden_size = embed_dims[-1] self.num_classes = num_classes self.global_pool = global_pool img_size = to_2tuple(img_size) self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], norm_layer=nn.LayerNorm) self.patch_embed2 = PatchEmbed(img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0], embed_dim=embed_dims[1], norm_layer=nn.LayerNorm) self.patch_embed3 = PatchEmbed(img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1], embed_dim=embed_dims[2], norm_layer=nn.LayerNorm) self.patch_embed4 = PatchEmbed(img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2], embed_dim=embed_dims[3], norm_layer=nn.LayerNorm) self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0])) self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1])) self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2])) self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3])) self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3) self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3) self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3) self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3) self.crpe1 = ConvRelPosEnc(head_chs=embed_dims[0] // num_heads, num_heads=num_heads, window=crpe_window) self.crpe2 = ConvRelPosEnc(head_chs=embed_dims[1] // num_heads, num_heads=num_heads, window=crpe_window) self.crpe3 = ConvRelPosEnc(head_chs=embed_dims[2] // num_heads, num_heads=num_heads, window=crpe_window) self.crpe4 = ConvRelPosEnc(head_chs=embed_dims[3] // num_heads, num_heads=num_heads, window=crpe_window) dpr = drop_path_rate assert dpr == 0.0 skwargs = dict(num_heads=num_heads, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer) self.serial_blocks1 = nn.ModuleList([SerialBlock(dim=embed_dims[0], mlp_ratio=mlp_ratios[0], shared_cpe=self.cpe1, shared_crpe=self.crpe1, **skwargs) for _ in range(serial_depths[0])]) self.serial_blocks2 = nn.ModuleList([SerialBlock(dim=embed_dims[1], mlp_ratio=mlp_ratios[1], shared_cpe=self.cpe2, shared_crpe=self.crpe2, **skwargs) for _ in range(serial_depths[1])]) self.serial_blocks3 = nn.ModuleList([SerialBlock(dim=embed_dims[2], mlp_ratio=mlp_ratios[2], shared_cpe=self.cpe3, shared_crpe=self.crpe3, **skwargs) for _ in range(serial_depths[2])]) self.serial_blocks4 = nn.ModuleList([SerialBlock(dim=embed_dims[3], mlp_ratio=mlp_ratios[3], shared_cpe=self.cpe4, shared_crpe=self.crpe4, **skwargs) for _ in range(serial_depths[3])]) self.parallel_depth = parallel_depth if self.parallel_depth > 0: self.parallel_blocks = nn.ModuleList([ParallelBlock(dims=embed_dims, mlp_ratios=mlp_ratios, shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4), **skwargs) for _ in range(parallel_depth)]) else: self.parallel_blocks = None if not self.return_interm_layers: if self.parallel_blocks is not None: self.norm2 = norm_layer(embed_dims[1]) self.norm3 = norm_layer(embed_dims[2]) else: self.norm2 = self.norm3 = None self.norm4 = norm_layer(embed_dims[3]) if self.parallel_depth > 0: assert embed_dims[1] == embed_dims[2] == embed_dims[3] self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() else: self.aggregate = None self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token1, std=0.02) trunc_normal_(self.cls_token2, std=0.02) trunc_normal_(self.cls_token3, std=0.02) trunc_normal_(self.cls_token4, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'} @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem1='^cls_token1|patch_embed1|crpe1|cpe1', serial_blocks1='^serial_blocks1\\.(\\d+)', stem2='^cls_token2|patch_embed2|crpe2|cpe2', serial_blocks2='^serial_blocks2\\.(\\d+)', stem3='^cls_token3|patch_embed3|crpe3|cpe3', serial_blocks3='^serial_blocks3\\.(\\d+)', stem4='^cls_token4|patch_embed4|crpe4|cpe4', serial_blocks4='^serial_blocks4\\.(\\d+)', parallel_blocks=[('^parallel_blocks\\.(\\d+)', None), ('^norm|aggregate', (99999,))]) return matcher @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x0): B = x0.shape[0] x1 = self.patch_embed1(x0) (H1, W1) = self.patch_embed1.grid_size x1 = insert_cls(x1, self.cls_token1) for blk in self.serial_blocks1: x1 = blk(x1, size=(H1, W1)) x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() x2 = self.patch_embed2(x1_nocls) (H2, W2) = self.patch_embed2.grid_size x2 = insert_cls(x2, self.cls_token2) for blk in self.serial_blocks2: x2 = blk(x2, size=(H2, W2)) x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() x3 = self.patch_embed3(x2_nocls) (H3, W3) = self.patch_embed3.grid_size x3 = insert_cls(x3, self.cls_token3) for blk in self.serial_blocks3: x3 = blk(x3, size=(H3, W3)) x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() x4 = self.patch_embed4(x3_nocls) (H4, W4) = self.patch_embed4.grid_size x4 = insert_cls(x4, self.cls_token4) for blk in self.serial_blocks4: x4 = blk(x4, size=(H4, W4)) x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() if self.parallel_blocks is None: if not torch.jit.is_scripting() and self.return_interm_layers: feat_out = {} if 'x1_nocls' in self.out_features: feat_out['x1_nocls'] = x1_nocls if 'x2_nocls' in self.out_features: feat_out['x2_nocls'] = x2_nocls if 'x3_nocls' in self.out_features: feat_out['x3_nocls'] = x3_nocls if 'x4_nocls' in self.out_features: feat_out['x4_nocls'] = x4_nocls return feat_out else: x4 = self.norm4(x4) return x4 for blk in self.parallel_blocks: (x2, x3, x4) = (self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4))) (x1, x2, x3, x4) = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)]) if not torch.jit.is_scripting() and self.return_interm_layers: feat_out = {} if 'x1_nocls' in self.out_features: x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() feat_out['x1_nocls'] = x1_nocls if 'x2_nocls' in self.out_features: x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() feat_out['x2_nocls'] = x2_nocls if 'x3_nocls' in self.out_features: x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() feat_out['x3_nocls'] = x3_nocls if 'x4_nocls' in self.out_features: x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() feat_out['x4_nocls'] = x4_nocls return feat_out else: x2 = self.norm2(x2) x3 = self.norm3(x3) x4 = self.norm4(x4) return [x2, x3, x4] def forward_head(self, x_feat: Union[torch.Tensor, List[torch.Tensor]], pre_logits: bool=False): if isinstance(x_feat, list): assert self.aggregate is not None if self.global_pool == 'avg': x = torch.cat([xl[:, 1:].mean(dim=1, keepdim=True) for xl in x_feat], dim=1) else: x = torch.stack([xl[:, 0] for xl in x_feat], dim=1) x = self.aggregate(x).squeeze(dim=1) else: x = x_feat[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x_feat[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x) -> torch.Tensor: if not torch.jit.is_scripting() and self.return_interm_layers: return self.forward_features(x) else: x_feat = self.forward_features(x) x = self.forward_head(x_feat) return x def insert_cls(x, cls_token): cls_tokens = cls_token.expand(x.shape[0], -1, -1) x = torch.cat((cls_tokens, x), dim=1) return x def remove_cls(x): return x[:, 1:, :] def checkpoint_filter_fn(state_dict, model): out_dict = {} state_dict = state_dict.get('model', state_dict) for (k, v) in state_dict.items(): if k.startswith('norm1') or (k.startswith('norm2') and getattr(model, 'norm2', None) is None) or (k.startswith('norm3') and getattr(model, 'norm3', None) is None) or (k.startswith('norm4') and getattr(model, 'norm4', None) is None) or (k.startswith('aggregate') and getattr(model, 'aggregate', None) is None) or (k.startswith('head') and getattr(model, 'head', None) is None): continue out_dict[k] = v return out_dict def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(CoaT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model def _cfg_coat(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed1.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'coat_tiny.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_mini.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_small.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_tiny.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_mini.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_small.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_medium.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_medium_384.in1k': _cfg_coat(hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, crop_mode='squash')}) @register_model def coat_tiny(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6) model = _create_coat('coat_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_mini(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6) model = _create_coat('coat_mini', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_small(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(patch_size=4, embed_dims=[152, 320, 320, 320], serial_depths=[2, 2, 2, 2], parallel_depth=6, **kwargs) model = _create_coat('coat_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_tiny(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4]) model = _create_coat('coat_lite_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_mini(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4]) model = _create_coat('coat_lite_mini', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_small(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], mlp_ratios=[8, 8, 4, 4]) model = _create_coat('coat_lite_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_medium(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8]) model = _create_coat('coat_lite_medium', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_medium_384(pretrained=False, **kwargs) -> CoaT: model_cfg = dict(img_size=384, patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8]) model = _create_coat('coat_lite_medium_384', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model # File: pytorch-image-models-main/timm/models/convit.py """""" '' from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, PatchEmbed, Mlp, LayerNorm, HybridEmbed from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs __all__ = ['ConVit'] @register_notrace_module class GPSA(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0, locality_strength=1.0): super().__init__() self.num_heads = num_heads self.dim = dim head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.locality_strength = locality_strength self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.pos_proj = nn.Linear(3, num_heads) self.proj_drop = nn.Dropout(proj_drop) self.gating_param = nn.Parameter(torch.ones(self.num_heads)) self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) def forward(self, x): (B, N, C) = x.shape if self.rel_indices is None or self.rel_indices.shape[1] != N: self.rel_indices = self.get_rel_indices(N) attn = self.get_attention(x) v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def get_attention(self, x): (B, N, C) = x.shape qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k) = (qk[0], qk[1]) pos_score = self.rel_indices.expand(B, -1, -1, -1) pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) patch_score = q @ k.transpose(-2, -1) * self.scale patch_score = patch_score.softmax(dim=-1) pos_score = pos_score.softmax(dim=-1) gating = self.gating_param.view(1, -1, 1, 1) attn = (1.0 - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score attn /= attn.sum(dim=-1).unsqueeze(-1) attn = self.attn_drop(attn) return attn def get_attention_map(self, x, return_map=False): attn_map = self.get_attention(x).mean(0) distances = self.rel_indices.squeeze()[:, :, -1] ** 0.5 dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) if return_map: return (dist, attn_map) else: return dist def local_init(self): self.v.weight.data.copy_(torch.eye(self.dim)) locality_distance = 1 kernel_size = int(self.num_heads ** 0.5) center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 for h1 in range(kernel_size): for h2 in range(kernel_size): position = h1 + kernel_size * h2 self.pos_proj.weight.data[position, 2] = -1 self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance self.pos_proj.weight.data *= self.locality_strength def get_rel_indices(self, num_patches: int) -> torch.Tensor: img_size = int(num_patches ** 0.5) rel_indices = torch.zeros(1, num_patches, num_patches, 3) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 rel_indices[:, :, :, 2] = indd.unsqueeze(0) rel_indices[:, :, :, 1] = indy.unsqueeze(0) rel_indices[:, :, :, 0] = indx.unsqueeze(0) device = self.qk.weight.device return rel_indices.to(device) class MHSA(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def get_attention_map(self, x, return_map=False): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k, v) = (qkv[0], qkv[1], qkv[2]) attn_map = q @ k.transpose(-2, -1) * self.scale attn_map = attn_map.softmax(dim=-1).mean(0) img_size = int(N ** 0.5) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 distances = indd ** 0.5 distances = distances.to(x.device) dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N if return_map: return (dist, attn_map) else: return dist def forward(self, x): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=LayerNorm, use_gpsa=True, locality_strength=1.0): super().__init__() self.norm1 = norm_layer(dim) self.use_gpsa = use_gpsa if self.use_gpsa: self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, locality_strength=locality_strength) else: self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConVit(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, drop_rate=0.0, pos_drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, hybrid_backbone=None, norm_layer=LayerNorm, local_up_to_layer=3, locality_strength=1.0, use_pos_embed=True): super().__init__() assert global_pool in ('', 'avg', 'token') embed_dim *= num_heads self.num_classes = num_classes self.global_pool = global_pool self.local_up_to_layer = local_up_to_layer self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.locality_strength = locality_strength self.use_pos_embed = use_pos_embed if hybrid_backbone is not None: self.patch_embed = HybridEmbed(hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.num_patches = num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_rate) if self.use_pos_embed: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) trunc_normal_(self.pos_embed, std=0.02) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_gpsa=i < local_up_to_layer, locality_strength=locality_strength) for i in range(depth)]) self.norm = norm_layer(embed_dim) self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=0.02) self.apply(self._init_weights) for (n, m) in self.named_modules(): if hasattr(m, 'local_init'): m.local_init() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^cls_token|pos_embed|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.use_pos_embed: x = x + self.pos_embed x = self.pos_drop(x) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) for (u, blk) in enumerate(self.blocks): if u == self.local_up_to_layer: x = torch.cat((cls_tokens, x), dim=1) x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_convit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') return build_model_with_cfg(ConVit, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'convit_tiny.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_small.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_base.fb_in1k': _cfg(hf_hub_id='timm/')}) @register_model def convit_tiny(pretrained=False, **kwargs) -> ConVit: model_args = dict(local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=4) model = _create_convit(variant='convit_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_small(pretrained=False, **kwargs) -> ConVit: model_args = dict(local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=9) model = _create_convit(variant='convit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_base(pretrained=False, **kwargs) -> ConVit: model_args = dict(local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=16) model = _create_convit(variant='convit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/convmixer.py """""" from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d from ._registry import register_model, generate_default_cfgs from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq __all__ = ['ConvMixer'] class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class ConvMixer(nn.Module): def __init__(self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, act_layer=nn.GELU, **kwargs): super().__init__() self.num_classes = num_classes self.num_features = self.head_hidden_size = dim self.grad_checkpointing = False self.stem = nn.Sequential(nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size), act_layer(), nn.BatchNorm2d(dim)) self.blocks = nn.Sequential(*[nn.Sequential(Residual(nn.Sequential(nn.Conv2d(dim, dim, kernel_size, groups=dim, padding='same'), act_layer(), nn.BatchNorm2d(dim))), nn.Conv2d(dim, dim, kernel_size=1), act_layer(), nn.BatchNorm2d(dim)) for i in range(depth)]) self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks='^blocks\\.(\\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.pooling(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_convmixer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for ConvMixer models.') return build_model_with_cfg(ConvMixer, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.96, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', 'first_conv': 'stem.0', **kwargs} default_cfgs = generate_default_cfgs({'convmixer_1536_20.in1k': _cfg(hf_hub_id='timm/'), 'convmixer_768_32.in1k': _cfg(hf_hub_id='timm/'), 'convmixer_1024_20_ks9_p14.in1k': _cfg(hf_hub_id='timm/')}) @register_model def convmixer_1536_20(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs) return _create_convmixer('convmixer_1536_20', pretrained, **model_args) @register_model def convmixer_768_32(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, act_layer=nn.ReLU, **kwargs) return _create_convmixer('convmixer_768_32', pretrained, **model_args) @register_model def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs) return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) # File: pytorch-image-models-main/timm/models/convnext.py """""" from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, LayerNorm2d, LayerNorm, create_conv2d, get_act_layer, make_divisible, to_ntuple from timm.layers import NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['ConvNeXt'] class Downsample(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=1): super().__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() if in_chs != out_chs: self.conv = create_conv2d(in_chs, out_chs, 1, stride=1) else: self.conv = nn.Identity() def forward(self, x): x = self.pool(x) x = self.conv(x) return x class ConvNeXtBlock(nn.Module): def __init__(self, in_chs: int, out_chs: Optional[int]=None, kernel_size: int=7, stride: int=1, dilation: Union[int, Tuple[int, int]]=(1, 1), mlp_ratio: float=4, conv_mlp: bool=False, conv_bias: bool=True, use_grn: bool=False, ls_init_value: Optional[float]=1e-06, act_layer: Union[str, Callable]='gelu', norm_layer: Optional[Callable]=None, drop_path: float=0.0): super().__init__() out_chs = out_chs or in_chs dilation = to_ntuple(2)(dilation) act_layer = get_act_layer(act_layer) if not norm_layer: norm_layer = LayerNorm2d if conv_mlp else LayerNorm mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp) self.use_conv_mlp = conv_mlp self.conv_dw = create_conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation[0], depthwise=True, bias=conv_bias) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0]) else: self.shortcut = nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = x.permute(0, 3, 1, 2) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) + self.shortcut(shortcut) return x class ConvNeXtStage(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=7, stride=2, depth=2, dilation=(1, 1), drop_path_rates=None, ls_init_value=1.0, conv_mlp=False, conv_bias=True, use_grn=False, act_layer='gelu', norm_layer=None, norm_layer_cl=None): super().__init__() self.grad_checkpointing = False if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]: ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1 pad = 'same' if dilation[1] > 1 else 0 self.downsample = nn.Sequential(norm_layer(in_chs), create_conv2d(in_chs, out_chs, kernel_size=ds_ks, stride=stride, dilation=dilation[0], padding=pad, bias=conv_bias)) in_chs = out_chs else: self.downsample = nn.Identity() drop_path_rates = drop_path_rates or [0.0] * depth stage_blocks = [] for i in range(depth): stage_blocks.append(ConvNeXtBlock(in_chs=in_chs, out_chs=out_chs, kernel_size=kernel_size, dilation=dilation[1], drop_path=drop_path_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer if conv_mlp else norm_layer_cl)) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class ConvNeXt(nn.Module): def __init__(self, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', output_stride: int=32, depths: Tuple[int, ...]=(3, 3, 9, 3), dims: Tuple[int, ...]=(96, 192, 384, 768), kernel_sizes: Union[int, Tuple[int, ...]]=7, ls_init_value: Optional[float]=1e-06, stem_type: str='patch', patch_size: int=4, head_init_scale: float=1.0, head_norm_first: bool=False, head_hidden_size: Optional[int]=None, conv_mlp: bool=False, conv_bias: bool=True, use_grn: bool=False, act_layer: Union[str, Callable]='gelu', norm_layer: Optional[Union[str, Callable]]=None, norm_eps: Optional[float]=None, drop_rate: float=0.0, drop_path_rate: float=0.0): super().__init__() assert output_stride in (8, 16, 32) kernel_sizes = to_ntuple(4)(kernel_sizes) if norm_layer is None: norm_layer = LayerNorm2d norm_layer_cl = norm_layer if conv_mlp else LayerNorm if norm_eps is not None: norm_layer = partial(norm_layer, eps=norm_eps) norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) else: assert conv_mlp, 'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input' norm_layer_cl = norm_layer if norm_eps is not None: norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate self.feature_info = [] assert stem_type in ('patch', 'overlap', 'overlap_tiered') if stem_type == 'patch': self.stem = nn.Sequential(nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias), norm_layer(dims[0])) stem_stride = patch_size else: mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0] self.stem = nn.Sequential(nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias), norm_layer(dims[0])) stem_stride = 4 self.stages = nn.Sequential() dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] prev_chs = dims[0] curr_stride = stem_stride dilation = 1 for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 if curr_stride >= output_stride and stride > 1: dilation *= stride stride = 1 curr_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 out_chs = dims[i] stages.append(ConvNeXtStage(prev_chs, out_chs, kernel_size=kernel_sizes[i], stride=stride, dilation=(first_dilation, dilation), depth=depths[i], drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl)) prev_chs = out_chs self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = prev_chs if head_norm_first: assert not head_hidden_size self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead(self.num_features, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, act_layer='gelu') self.head_hidden_size = self.head.num_features named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+)\\.downsample', (0,)), ('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None), ('^norm_pre', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stages) + 1, indices) feat_idx = 0 x = self.stem(x) if feat_idx in take_indices: intermediates.append(x) if torch.jit.is_scripting() or not stop_early: stages = self.stages else: stages = self.stages[:max_index] for stage in stages: feat_idx += 1 x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates x = self.norm_pre(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stages) + 1, indices) self.stages = self.stages[:max_index] if prune_norm: self.norm_pre = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_(module.weight, std=0.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict if 'model' in state_dict: state_dict = state_dict['model'] out_dict = {} if 'visual.trunk.stem.0.weight' in state_dict: out_dict = {k.replace('visual.trunk.', ''): v for (k, v) in state_dict.items() if k.startswith('visual.trunk.')} if 'visual.head.proj.weight' in state_dict: out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) elif 'visual.head.mlp.fc1.weight' in state_dict: out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight'] out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias'] out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0]) return out_dict import re for (k, v) in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub('stages.([0-9]+).([0-9]+)', 'stages.\\1.blocks.\\2', k) k = re.sub('downsample_layers.([0-9]+).([0-9]+)', 'stages.\\1.downsample.\\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') if 'grn' in k: k = k.replace('grn.beta', 'mlp.grn.bias') k = k.replace('grn.gamma', 'mlp.grn.weight') v = v.reshape(v.shape[-1]) k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_convnext(variant, pretrained=False, **kwargs): if kwargs.get('pretrained_cfg', '') == 'fcmae': kwargs.setdefault('pretrained_strict', False) model = build_model_with_cfg(ConvNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs} def _cfgv2(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', 'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808', 'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders', 'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2', **kwargs} default_cfgs = generate_default_cfgs({'convnext_tiny.in12k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.in12k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_atto.d2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_atto_ols.a2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_femto.d1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_femto_ols.d1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico.d1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico_ols.d1_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.in12k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.d1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano_ols.d1h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny_hnf.a2h_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.in12k_ft_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.in12k_ft_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_nano.in12k': _cfg(hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_tiny.in12k': _cfg(hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_small.in12k': _cfg(hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_tiny.fb_in22k_ft_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in22k_ft_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in22k_ft_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in22k_ft_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_xlarge.fb_in22k_ft_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in22k_ft_in1k_384': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.fb_in22k_ft_in1k_384': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.fb_in22k_ft_in1k_384': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large.fb_in22k_ft_in1k_384': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_tiny.fb_in22k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth', hf_hub_id='timm/', num_classes=21841), 'convnext_small.fb_in22k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth', hf_hub_id='timm/', num_classes=21841), 'convnext_base.fb_in22k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth', hf_hub_id='timm/', num_classes=21841), 'convnext_large.fb_in22k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth', hf_hub_id='timm/', num_classes=21841), 'convnext_xlarge.fb_in22k': _cfg(url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth', hf_hub_id='timm/', num_classes=21841), 'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt', hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'), 'convnextv2_atto.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_femto.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_pico.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_nano.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_huge.fcmae_ft_in1k': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_atto.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_femto.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_pico.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_nano.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_tiny.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_base.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_large.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_huge.fcmae': _cfgv2(url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_small.untrained': _cfg(), 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xxlarge.clip_laion2b_soup_ft_in12k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b': _cfg(hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laion2b_augreg': _cfg(hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona': _cfg(hf_hub_id='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_320': _cfg(hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_augreg_320': _cfg(hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_large_mlp.clip_laion2b_augreg': _cfg(hf_hub_id='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_320': _cfg(hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg(hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_xxlarge.clip_laion2b_soup': _cfg(hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), 'convnext_xxlarge.clip_laion2b_rewind': _cfg(hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024)}) @register_model def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True) model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True) model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True) model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True) model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap') model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True) model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768)) model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768]) model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024]) model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536]) model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536) model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048]) model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-05)) model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, {'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k', 'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k', 'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k', 'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k', 'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k', 'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384', 'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384', 'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384', 'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384', 'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384', 'convnext_tiny_in22k': 'convnext_tiny.fb_in22k', 'convnext_small_in22k': 'convnext_small.fb_in22k', 'convnext_base_in22k': 'convnext_base.fb_in22k', 'convnext_large_in22k': 'convnext_large.fb_in22k', 'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k'}) # File: pytorch-image-models-main/timm/models/crossvit.py """""" '' from functools import partial from typing import List, Optional, Tuple import torch import torch.hub import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, trunc_normal_, _assert from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._registry import register_model, generate_default_cfgs from .vision_transformer import Block __all__ = ['CrossVit'] class PatchEmbed(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = img_size[1] // patch_size[1] * (img_size[0] // patch_size[0]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches if multi_conv: if patch_size[0] == 12: self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1)) elif patch_size[0] == 16: self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1)) else: self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): (B, C, H, W) = x.shape _assert(H == self.img_size[0], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") _assert(W == self.img_size[1], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") x = self.proj(x).flatten(2).transpose(1, 2) return x class CrossAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.wq = nn.Linear(dim, dim, bias=qkv_bias) self.wk = nn.Linear(dim, dim, bias=qkv_bias) self.wv = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, N, C) = x.shape q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, 1, C) x = self.proj(x) x = self.proj_drop(x) return x class CrossAttentionBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) return x class MultiScaleBlock(nn.Module): def __init__(self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() num_branches = len(dim) self.num_branches = num_branches self.blocks = nn.ModuleList() for d in range(num_branches): tmp = [] for i in range(depth[d]): tmp.append(Block(dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer)) if len(tmp) != 0: self.blocks.append(nn.Sequential(*tmp)) if len(self.blocks) == 0: self.blocks = None self.projs = nn.ModuleList() for d in range(num_branches): if dim[d] == dim[(d + 1) % num_branches] and False: tmp = [nn.Identity()] else: tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] self.projs.append(nn.Sequential(*tmp)) self.fusion = nn.ModuleList() for d in range(num_branches): d_ = (d + 1) % num_branches nh = num_heads[d_] if depth[-1] == 0: self.fusion.append(CrossAttentionBlock(dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) else: tmp = [] for _ in range(depth[-1]): tmp.append(CrossAttentionBlock(dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) self.fusion.append(nn.Sequential(*tmp)) self.revert_projs = nn.ModuleList() for d in range(num_branches): if dim[(d + 1) % num_branches] == dim[d] and False: tmp = [nn.Identity()] else: tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), nn.Linear(dim[(d + 1) % num_branches], dim[d])] self.revert_projs.append(nn.Sequential(*tmp)) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: outs_b = [] for (i, block) in enumerate(self.blocks): outs_b.append(block(x[i])) proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) for (i, proj) in enumerate(self.projs): proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) outs = [] for (i, (fusion, revert_proj)) in enumerate(zip(self.fusion, self.revert_projs)): tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) tmp = fusion(tmp) reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) outs.append(tmp) return outs def _compute_num_patches(img_size, patches): return [i[0] // p * i[1] // p for (i, p) in zip(img_size, patches)] @register_notrace_function def scale_image(x, ss: Tuple[int, int], crop_scale: bool=False): (H, W) = x.shape[-2:] if H != ss[0] or W != ss[1]: if crop_scale and ss[0] <= H and (ss[1] <= W): (cu, cl) = (int(round((H - ss[0]) / 2.0)), int(round((W - ss[1]) / 2.0))) x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]] else: x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False) return x class CrossVit(nn.Module): def __init__(self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000, embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2.0, 2.0, 4.0), multi_conv=False, crop_scale=False, qkv_bias=True, drop_rate=0.0, pos_drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=partial(nn.LayerNorm, eps=1e-06), global_pool='token'): super().__init__() assert global_pool in ('token', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.img_size = to_2tuple(img_size) img_scale = to_2tuple(img_scale) self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] self.crop_scale = crop_scale num_patches = _compute_num_patches(self.img_size_scaled, patch_size) self.num_branches = len(patch_size) self.embed_dim = embed_dim self.num_features = self.head_hidden_size = sum(embed_dim) self.patch_embed = nn.ModuleList() for i in range(self.num_branches): setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) for (im_s, p, d) in zip(self.img_size_scaled, patch_size, embed_dim): self.patch_embed.append(PatchEmbed(img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv)) self.pos_drop = nn.Dropout(p=pos_drop_rate) total_depth = sum([sum(x[-2:]) for x in depth]) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] dpr_ptr = 0 self.blocks = nn.ModuleList() for (idx, block_cfg) in enumerate(depth): curr_depth = max(block_cfg[:-1]) + block_cfg[-1] dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] blk = MultiScaleBlock(embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer) dpr_ptr += curr_depth self.blocks.append(blk) self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) self.head_drop = nn.Dropout(drop_rate) self.head = nn.ModuleList([nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in range(self.num_branches)]) for i in range(self.num_branches): trunc_normal_(getattr(self, f'pos_embed_{i}'), std=0.02) trunc_normal_(getattr(self, f'cls_token_{i}'), std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): out = set() for i in range(self.num_branches): out.add(f'cls_token_{i}') pe = getattr(self, f'pos_embed_{i}', None) if pe is not None and pe.requires_grad: out.add(f'pos_embed_{i}') return out @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^cls_token|pos_embed|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('token', 'avg') self.global_pool = global_pool self.head = nn.ModuleList([nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in range(self.num_branches)]) def forward_features(self, x) -> List[torch.Tensor]: B = x.shape[0] xs = [] for (i, patch_embed) in enumerate(self.patch_embed): x_ = x ss = self.img_size_scaled[i] x_ = scale_image(x_, ss, self.crop_scale) x_ = patch_embed(x_) cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 cls_tokens = cls_tokens.expand(B, -1, -1) x_ = torch.cat((cls_tokens, x_), dim=1) pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 x_ = x_ + pos_embed x_ = self.pos_drop(x_) xs.append(x_) for (i, blk) in enumerate(self.blocks): xs = blk(xs) xs = [norm(xs[i]) for (i, norm) in enumerate(self.norm)] return xs def forward_head(self, xs: List[torch.Tensor], pre_logits: bool=False) -> torch.Tensor: xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs] xs = [self.head_drop(x) for x in xs] if pre_logits or isinstance(self.head[0], nn.Identity): return torch.cat([x for x in xs], dim=1) return torch.mean(torch.stack([head(xs[i]) for (i, head) in enumerate(self.head)], dim=0), dim=0) def forward(self, x): xs = self.forward_features(x) x = self.forward_head(xs) return x def _create_crossvit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') def pretrained_filter_fn(state_dict): new_state_dict = {} for key in state_dict.keys(): if 'pos_embed' in key or 'cls_token' in key: new_key = key.replace('.', '_') else: new_key = key new_state_dict[new_key] = state_dict[key] return new_state_dict return build_model_with_cfg(CrossVit, variant, pretrained, pretrained_filter_fn=pretrained_filter_fn, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), 'classifier': ('head.0', 'head.1'), **kwargs} default_cfgs = generate_default_cfgs({'crossvit_15_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_15_dagger_240.in1k': _cfg(hf_hub_id='timm/', first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0')), 'crossvit_15_dagger_408.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0), 'crossvit_18_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_18_dagger_240.in1k': _cfg(hf_hub_id='timm/', first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0')), 'crossvit_18_dagger_408.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0), 'crossvit_9_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_9_dagger_240.in1k': _cfg(hf_hub_id='timm/', first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0')), 'crossvit_base_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_small_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_tiny_240.in1k': _cfg(hf_hub_id='timm/')}) @register_model def crossvit_tiny_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], num_heads=[3, 3], mlp_ratio=[4, 4, 1]) model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_small_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], num_heads=[6, 6], mlp_ratio=[4, 4, 1]) model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_base_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], num_heads=[12, 12], mlp_ratio=[4, 4, 1]) model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_9_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], num_heads=[4, 4], mlp_ratio=[3, 3, 1]) model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_15_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], num_heads=[6, 6], mlp_ratio=[3, 3, 1]) model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_18_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_9_dagger_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_15_dagger_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_15_dagger_408(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 384 / 408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_18_dagger_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_18_dagger_408(pretrained=False, **kwargs) -> CrossVit: model_args = dict(img_scale=(1.0, 384 / 408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/cspnet.py """""" from dataclasses import dataclass, asdict, replace from functools import partial from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, ConvNormAct, DropPath, get_attn, create_act_layer, make_divisible from ._builder import build_model_with_cfg from ._manipulate import named_apply, MATCH_PREV_GROUP from ._registry import register_model, generate_default_cfgs __all__ = ['CspNet'] @dataclass class CspStemCfg: out_chs: Union[int, Tuple[int, ...]] = 32 stride: Union[int, Tuple[int, ...]] = 2 kernel_size: int = 3 padding: Union[int, str] = '' pool: Optional[str] = '' def _pad_arg(x, n): if not isinstance(x, (tuple, list)): x = (x,) curr_n = len(x) pad_n = n - curr_n if pad_n <= 0: return x[:n] return tuple(x + (x[-1],) * pad_n) @dataclass class CspStagesCfg: depth: Tuple[int, ...] = (3, 3, 5, 2) out_chs: Tuple[int, ...] = (128, 256, 512, 1024) stride: Union[int, Tuple[int, ...]] = 2 groups: Union[int, Tuple[int, ...]] = 1 block_ratio: Union[float, Tuple[float, ...]] = 1.0 bottle_ratio: Union[float, Tuple[float, ...]] = 1.0 avg_down: Union[bool, Tuple[bool, ...]] = False attn_layer: Optional[Union[str, Tuple[str, ...]]] = None attn_kwargs: Optional[Union[Dict, Tuple[Dict]]] = None stage_type: Union[str, Tuple[str]] = 'csp' block_type: Union[str, Tuple[str]] = 'bottle' expand_ratio: Union[float, Tuple[float, ...]] = 1.0 cross_linear: Union[bool, Tuple[bool, ...]] = False down_growth: Union[bool, Tuple[bool, ...]] = False def __post_init__(self): n = len(self.depth) assert len(self.out_chs) == n self.stride = _pad_arg(self.stride, n) self.groups = _pad_arg(self.groups, n) self.block_ratio = _pad_arg(self.block_ratio, n) self.bottle_ratio = _pad_arg(self.bottle_ratio, n) self.avg_down = _pad_arg(self.avg_down, n) self.attn_layer = _pad_arg(self.attn_layer, n) self.attn_kwargs = _pad_arg(self.attn_kwargs, n) self.stage_type = _pad_arg(self.stage_type, n) self.block_type = _pad_arg(self.block_type, n) self.expand_ratio = _pad_arg(self.expand_ratio, n) self.cross_linear = _pad_arg(self.cross_linear, n) self.down_growth = _pad_arg(self.down_growth, n) @dataclass class CspModelCfg: stem: CspStemCfg stages: CspStagesCfg zero_init_last: bool = True act_layer: str = 'leaky_relu' norm_layer: str = 'batchnorm' aa_layer: Optional[str] = None def _cs3_cfg(width_multiplier=1.0, depth_multiplier=1.0, avg_down=False, act_layer='silu', focus=False, attn_layer=None, attn_kwargs=None, bottle_ratio=1.0, block_type='dark'): if focus: stem_cfg = CspStemCfg(out_chs=make_divisible(64 * width_multiplier), kernel_size=6, stride=2, padding=2, pool='') else: stem_cfg = CspStemCfg(out_chs=tuple([make_divisible(c * width_multiplier) for c in (32, 64)]), kernel_size=3, stride=2, pool='') return CspModelCfg(stem=stem_cfg, stages=CspStagesCfg(out_chs=tuple([make_divisible(c * width_multiplier) for c in (128, 256, 512, 1024)]), depth=tuple([int(d * depth_multiplier) for d in (3, 6, 9, 3)]), stride=2, bottle_ratio=bottle_ratio, block_ratio=0.5, avg_down=avg_down, attn_layer=attn_layer, attn_kwargs=attn_kwargs, stage_type='cs3', block_type=block_type), act_layer=act_layer) class BottleneckBlock(nn.Module): def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.25, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_last=False, attn_layer=None, drop_block=None, drop_path=0.0): super(BottleneckBlock, self).__init__() mid_chs = int(round(out_chs * bottle_ratio)) ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) attn_last = attn_layer is not None and attn_last attn_first = attn_layer is not None and (not attn_last) self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) self.conv2 = ConvNormAct(mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs) self.attn2 = attn_layer(mid_chs, act_layer=act_layer) if attn_first else nn.Identity() self.conv3 = ConvNormAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) self.attn3 = attn_layer(out_chs, act_layer=act_layer) if attn_last else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() self.act3 = create_act_layer(act_layer) def zero_init_last(self): nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.attn2(x) x = self.conv3(x) x = self.attn3(x) x = self.drop_path(x) + shortcut x = self.act3(x) return x class DarkBlock(nn.Module): def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, drop_block=None, drop_path=0.0): super(DarkBlock, self).__init__() mid_chs = int(round(out_chs * bottle_ratio)) ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs) self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.attn(x) x = self.conv2(x) x = self.drop_path(x) + shortcut return x class EdgeBlock(nn.Module): def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, drop_block=None, drop_path=0.0): super(EdgeBlock, self).__init__() mid_chs = int(round(out_chs * bottle_ratio)) ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs) self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=1, **ckwargs) self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.attn(x) x = self.conv2(x) x = self.drop_path(x) + shortcut return x class CrossStage(nn.Module): def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1.0, bottle_ratio=1.0, expand_ratio=1.0, groups=1, first_dilation=None, avg_down=False, down_growth=False, cross_linear=False, block_dpr=None, block_fn=BottleneckBlock, **block_kwargs): super(CrossStage, self).__init__() first_dilation = first_dilation or dilation down_chs = out_chs if down_growth else in_chs self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) block_out_chs = int(round(out_chs * block_ratio)) conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) aa_layer = block_kwargs.pop('aa_layer', None) if stride != 1 or first_dilation != dilation: if avg_down: self.conv_down = nn.Sequential(nn.AvgPool2d(2) if stride == 2 else nn.Identity(), ConvNormAct(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)) else: self.conv_down = ConvNormAct(in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, aa_layer=aa_layer, **conv_kwargs) prev_chs = down_chs else: self.conv_down = nn.Identity() prev_chs = in_chs self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) prev_chs = exp_chs // 2 self.blocks = nn.Sequential() for i in range(depth): self.blocks.add_module(str(i), block_fn(in_chs=prev_chs, out_chs=block_out_chs, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, drop_path=block_dpr[i] if block_dpr is not None else 0.0, **block_kwargs)) prev_chs = block_out_chs self.conv_transition_b = ConvNormAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) def forward(self, x): x = self.conv_down(x) x = self.conv_exp(x) (xs, xb) = x.split(self.expand_chs // 2, dim=1) xb = self.blocks(xb) xb = self.conv_transition_b(xb).contiguous() out = self.conv_transition(torch.cat([xs, xb], dim=1)) return out class CrossStage3(nn.Module): def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1.0, bottle_ratio=1.0, expand_ratio=1.0, groups=1, first_dilation=None, avg_down=False, down_growth=False, cross_linear=False, block_dpr=None, block_fn=BottleneckBlock, **block_kwargs): super(CrossStage3, self).__init__() first_dilation = first_dilation or dilation down_chs = out_chs if down_growth else in_chs self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) block_out_chs = int(round(out_chs * block_ratio)) conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) aa_layer = block_kwargs.pop('aa_layer', None) if stride != 1 or first_dilation != dilation: if avg_down: self.conv_down = nn.Sequential(nn.AvgPool2d(2) if stride == 2 else nn.Identity(), ConvNormAct(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)) else: self.conv_down = ConvNormAct(in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, aa_layer=aa_layer, **conv_kwargs) prev_chs = down_chs else: self.conv_down = None prev_chs = in_chs self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) prev_chs = exp_chs // 2 self.blocks = nn.Sequential() for i in range(depth): self.blocks.add_module(str(i), block_fn(in_chs=prev_chs, out_chs=block_out_chs, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, drop_path=block_dpr[i] if block_dpr is not None else 0.0, **block_kwargs)) prev_chs = block_out_chs self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) def forward(self, x): x = self.conv_down(x) x = self.conv_exp(x) (x1, x2) = x.split(self.expand_chs // 2, dim=1) x1 = self.blocks(x1) out = self.conv_transition(torch.cat([x1, x2], dim=1)) return out class DarkStage(nn.Module): def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1.0, bottle_ratio=1.0, groups=1, first_dilation=None, avg_down=False, block_fn=BottleneckBlock, block_dpr=None, **block_kwargs): super(DarkStage, self).__init__() first_dilation = first_dilation or dilation conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) aa_layer = block_kwargs.pop('aa_layer', None) if avg_down: self.conv_down = nn.Sequential(nn.AvgPool2d(2) if stride == 2 else nn.Identity(), ConvNormAct(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)) else: self.conv_down = ConvNormAct(in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, aa_layer=aa_layer, **conv_kwargs) prev_chs = out_chs block_out_chs = int(round(out_chs * block_ratio)) self.blocks = nn.Sequential() for i in range(depth): self.blocks.add_module(str(i), block_fn(in_chs=prev_chs, out_chs=block_out_chs, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, drop_path=block_dpr[i] if block_dpr is not None else 0.0, **block_kwargs)) prev_chs = block_out_chs def forward(self, x): x = self.conv_down(x) x = self.blocks(x) return x def create_csp_stem(in_chans=3, out_chs=32, kernel_size=3, stride=2, pool='', padding='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None): stem = nn.Sequential() feature_info = [] if not isinstance(out_chs, (tuple, list)): out_chs = [out_chs] stem_depth = len(out_chs) assert stem_depth assert stride in (1, 2, 4) prev_feat = None prev_chs = in_chans last_idx = stem_depth - 1 stem_stride = 1 for (i, chs) in enumerate(out_chs): conv_name = f'conv{i + 1}' conv_stride = 2 if i == 0 and stride > 1 or (i == last_idx and stride > 2 and (not pool)) else 1 if conv_stride > 1 and prev_feat is not None: feature_info.append(prev_feat) stem.add_module(conv_name, ConvNormAct(prev_chs, chs, kernel_size, stride=conv_stride, padding=padding if i == 0 else '', act_layer=act_layer, norm_layer=norm_layer)) stem_stride *= conv_stride prev_chs = chs prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', conv_name])) if pool: assert stride > 2 if prev_feat is not None: feature_info.append(prev_feat) if aa_layer is not None: stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) stem.add_module('aa', aa_layer(channels=prev_chs, stride=2)) pool_name = 'aa' else: stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) pool_name = 'pool' stem_stride *= 2 prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', pool_name])) feature_info.append(prev_feat) return (stem, feature_info) def _get_stage_fn(stage_args): stage_type = stage_args.pop('stage_type') assert stage_type in ('dark', 'csp', 'cs3') if stage_type == 'dark': stage_args.pop('expand_ratio', None) stage_args.pop('cross_linear', None) stage_args.pop('down_growth', None) stage_fn = DarkStage elif stage_type == 'csp': stage_fn = CrossStage else: stage_fn = CrossStage3 return (stage_fn, stage_args) def _get_block_fn(stage_args): block_type = stage_args.pop('block_type') assert block_type in ('dark', 'edge', 'bottle') if block_type == 'dark': return (DarkBlock, stage_args) elif block_type == 'edge': return (EdgeBlock, stage_args) else: return (BottleneckBlock, stage_args) def _get_attn_fn(stage_args): attn_layer = stage_args.pop('attn_layer') attn_kwargs = stage_args.pop('attn_kwargs', None) or {} if attn_layer is not None: attn_layer = get_attn(attn_layer) if attn_kwargs: attn_layer = partial(attn_layer, **attn_kwargs) return (attn_layer, stage_args) def create_csp_stages(cfg: CspModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any]): cfg_dict = asdict(cfg.stages) num_stages = len(cfg.stages.depth) cfg_dict['block_dpr'] = [None] * num_stages if not drop_path_rate else [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.stages.depth)).split(cfg.stages.depth)] stage_args = [dict(zip(cfg_dict.keys(), values)) for values in zip(*cfg_dict.values())] block_kwargs = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer) dilation = 1 net_stride = stem_feat['reduction'] prev_chs = stem_feat['num_chs'] prev_feat = stem_feat feature_info = [] stages = [] for (stage_idx, stage_args) in enumerate(stage_args): (stage_fn, stage_args) = _get_stage_fn(stage_args) (block_fn, stage_args) = _get_block_fn(stage_args) (attn_fn, stage_args) = _get_attn_fn(stage_args) stride = stage_args.pop('stride') if stride != 1 and prev_feat: feature_info.append(prev_feat) if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 stages += [stage_fn(prev_chs, **stage_args, stride=stride, first_dilation=first_dilation, dilation=dilation, block_fn=block_fn, aa_layer=cfg.aa_layer, attn_layer=attn_fn, **block_kwargs)] prev_chs = stage_args['out_chs'] prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') feature_info.append(prev_feat) return (nn.Sequential(*stages), feature_info) class CspNet(nn.Module): def __init__(self, cfg: CspModelCfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, drop_path_rate=0.0, zero_init_last=True, **kwargs): super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride in (8, 16, 32) cfg = replace(cfg, **kwargs) layer_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer, aa_layer=cfg.aa_layer) self.feature_info = [] (self.stem, stem_feat_info) = create_csp_stem(in_chans, **asdict(cfg.stem), **layer_args) self.feature_info.extend(stem_feat_info[:-1]) (self.stages, stage_feat_info) = create_csp_stages(cfg, drop_path_rate=drop_path_rate, output_stride=output_stride, stem_feat=stem_feat_info[-1]) prev_chs = stage_feat_info[-1]['num_chs'] self.feature_info.extend(stage_feat_info) self.num_features = self.head_hidden_size = prev_chs self.head = ClassifierHead(in_features=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None), ('^stages\\.(\\d+)\\..*transition', MATCH_PREV_GROUP), ('^stages\\.(\\d+)', (0,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name, zero_init_last=False): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif zero_init_last and hasattr(module, 'zero_init_last'): module.zero_init_last() model_cfgs = dict(cspresnet50=CspModelCfg(stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), stages=CspStagesCfg(depth=(3, 3, 5, 2), out_chs=(128, 256, 512, 1024), stride=(1, 2), expand_ratio=2.0, bottle_ratio=0.5, cross_linear=True)), cspresnet50d=CspModelCfg(stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), stages=CspStagesCfg(depth=(3, 3, 5, 2), out_chs=(128, 256, 512, 1024), stride=(1,) + (2,), expand_ratio=2.0, bottle_ratio=0.5, block_ratio=1.0, cross_linear=True)), cspresnet50w=CspModelCfg(stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), stages=CspStagesCfg(depth=(3, 3, 5, 2), out_chs=(256, 512, 1024, 2048), stride=(1,) + (2,), expand_ratio=1.0, bottle_ratio=0.25, block_ratio=0.5, cross_linear=True)), cspresnext50=CspModelCfg(stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), stages=CspStagesCfg(depth=(3, 3, 5, 2), out_chs=(256, 512, 1024, 2048), stride=(1,) + (2,), groups=32, expand_ratio=1.0, bottle_ratio=1.0, block_ratio=0.5, cross_linear=True)), cspdarknet53=CspModelCfg(stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg(depth=(1, 2, 8, 8, 4), out_chs=(64, 128, 256, 512, 1024), stride=2, expand_ratio=(2.0,) + (1.0,), bottle_ratio=(0.5,) + (1.0,), block_ratio=(1.0,) + (0.5,), down_growth=True, block_type='dark')), darknet17=CspModelCfg(stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg(depth=(1,) * 5, out_chs=(64, 128, 256, 512, 1024), stride=(2,), bottle_ratio=(0.5,), block_ratio=(1.0,), stage_type='dark', block_type='dark')), darknet21=CspModelCfg(stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg(depth=(1, 1, 1, 2, 2), out_chs=(64, 128, 256, 512, 1024), stride=(2,), bottle_ratio=(0.5,), block_ratio=(1.0,), stage_type='dark', block_type='dark')), sedarknet21=CspModelCfg(stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg(depth=(1, 1, 1, 2, 2), out_chs=(64, 128, 256, 512, 1024), stride=2, bottle_ratio=0.5, block_ratio=1.0, attn_layer='se', stage_type='dark', block_type='dark')), darknet53=CspModelCfg(stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg(depth=(1, 2, 8, 8, 4), out_chs=(64, 128, 256, 512, 1024), stride=2, bottle_ratio=0.5, block_ratio=1.0, stage_type='dark', block_type='dark')), darknetaa53=CspModelCfg(stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg(depth=(1, 2, 8, 8, 4), out_chs=(64, 128, 256, 512, 1024), stride=2, bottle_ratio=0.5, block_ratio=1.0, avg_down=True, stage_type='dark', block_type='dark')), cs3darknet_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5), cs3darknet_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67), cs3darknet_l=_cs3_cfg(), cs3darknet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33), cs3darknet_focus_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5, focus=True), cs3darknet_focus_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67, focus=True), cs3darknet_focus_l=_cs3_cfg(focus=True), cs3darknet_focus_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, focus=True), cs3sedarknet_l=_cs3_cfg(attn_layer='se', attn_kwargs=dict(rd_ratio=0.25)), cs3sedarknet_x=_cs3_cfg(attn_layer='se', width_multiplier=1.25, depth_multiplier=1.33), cs3sedarknet_xdw=CspModelCfg(stem=CspStemCfg(out_chs=(32, 64), kernel_size=3, stride=2, pool=''), stages=CspStagesCfg(depth=(3, 6, 12, 4), out_chs=(256, 512, 1024, 2048), stride=2, groups=(1, 1, 256, 512), bottle_ratio=0.5, block_ratio=0.5, attn_layer='se'), act_layer='silu'), cs3edgenet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge'), cs3se_edgenet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25))) def _create_cspnet(variant, pretrained=False, **kwargs): if variant.startswith('darknet') or variant.startswith('cspdarknet'): default_out_indices = (0, 1, 2, 3, 4, 5) else: default_out_indices = (0, 1, 2, 3, 4) out_indices = kwargs.pop('out_indices', default_out_indices) return build_model_with_cfg(CspNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.887, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'cspresnet50.ra_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), 'cspresnet50d.untrained': _cfg(), 'cspresnet50w.untrained': _cfg(), 'cspresnext50.ra_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth'), 'cspdarknet53.ra_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), 'darknet17.untrained': _cfg(), 'darknet21.untrained': _cfg(), 'sedarknet21.untrained': _cfg(), 'darknet53.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknet53_256_c2ns-3aeff817.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'darknetaa53.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknetaa53_c2ns-5c28ec8a.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3darknet_s.untrained': _cfg(interpolation='bicubic'), 'cs3darknet_m.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_m_c2ns-43f06604.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3darknet_l.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_l_c2ns-16220c5d.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3darknet_x.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_x_c2ns-4e4490aa.pth', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3darknet_focus_s.untrained': _cfg(interpolation='bicubic'), 'cs3darknet_focus_m.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_m_c2ns-e23bed41.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3darknet_focus_l.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_l_c2ns-65ef8888.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3darknet_focus_x.untrained': _cfg(interpolation='bicubic'), 'cs3sedarknet_l.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_l_c2ns-e8d1dc13.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3sedarknet_x.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_x_c2ns-b4d0abc0.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3sedarknet_xdw.untrained': _cfg(interpolation='bicubic'), 'cs3edgenet_x.c2_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3edgenet_x_c2-2e1610a9.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3se_edgenet_x.c2ns_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3se_edgenet_x_c2ns-76f8e3ac.pth', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0)}) @register_model def cspresnet50(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) @register_model def cspresnet50d(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) @register_model def cspresnet50w(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) @register_model def cspresnext50(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) @register_model def cspdarknet53(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspdarknet53', pretrained=pretrained, **kwargs) @register_model def darknet17(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknet17', pretrained=pretrained, **kwargs) @register_model def darknet21(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknet21', pretrained=pretrained, **kwargs) @register_model def sedarknet21(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('sedarknet21', pretrained=pretrained, **kwargs) @register_model def darknet53(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknet53', pretrained=pretrained, **kwargs) @register_model def darknetaa53(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknetaa53', pretrained=pretrained, **kwargs) @register_model def cs3darknet_s(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_s', pretrained=pretrained, **kwargs) @register_model def cs3darknet_m(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_m', pretrained=pretrained, **kwargs) @register_model def cs3darknet_l(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_l', pretrained=pretrained, **kwargs) @register_model def cs3darknet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_x', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_s(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_s', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_m(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_m', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_l(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_l', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_x', pretrained=pretrained, **kwargs) @register_model def cs3sedarknet_l(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3sedarknet_l', pretrained=pretrained, **kwargs) @register_model def cs3sedarknet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3sedarknet_x', pretrained=pretrained, **kwargs) @register_model def cs3sedarknet_xdw(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3sedarknet_xdw', pretrained=pretrained, **kwargs) @register_model def cs3edgenet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3edgenet_x', pretrained=pretrained, **kwargs) @register_model def cs3se_edgenet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3se_edgenet_x', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/davit.py """""" from functools import partial from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, trunc_normal_, Mlp, LayerNorm2d, get_norm_layer, use_fused_attn from timm.layers import NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['DaVit'] class ConvPosEnc(nn.Module): def __init__(self, dim: int, k: int=3, act: bool=False): super(ConvPosEnc, self).__init__() self.proj = nn.Conv2d(dim, dim, kernel_size=k, stride=1, padding=k // 2, groups=dim) self.act = nn.GELU() if act else nn.Identity() def forward(self, x: Tensor): feat = self.proj(x) x = x + self.act(feat) return x class Stem(nn.Module): def __init__(self, in_chs=3, out_chs=96, stride=4, norm_layer=LayerNorm2d): super().__init__() stride = to_2tuple(stride) self.stride = stride self.in_chs = in_chs self.out_chs = out_chs assert stride[0] == 4 self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=7, stride=stride, padding=3) self.norm = norm_layer(out_chs) def forward(self, x: Tensor): (B, C, H, W) = x.shape pad_r = (self.stride[1] - W % self.stride[1]) % self.stride[1] pad_b = (self.stride[0] - H % self.stride[0]) % self.stride[0] x = F.pad(x, (0, pad_r, 0, pad_b)) x = self.conv(x) x = self.norm(x) return x class Downsample(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=3, norm_layer=LayerNorm2d): super().__init__() self.in_chs = in_chs self.out_chs = out_chs self.norm = norm_layer(in_chs) self.even_k = kernel_size % 2 == 0 self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=2, padding=0 if self.even_k else kernel_size // 2) def forward(self, x: Tensor): (B, C, H, W) = x.shape x = self.norm(x) if self.even_k: (k_h, k_w) = self.conv.kernel_size pad_r = (k_w - W % k_w) % k_w pad_b = (k_h - H % k_h) % k_h x = F.pad(x, (0, pad_r, 0, pad_b)) x = self.conv(x) return x class ChannelAttentionV2(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=True, dynamic_scale=True): super().__init__() self.groups = num_heads self.head_dim = dim // num_heads self.dynamic_scale = dynamic_scale self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) def forward(self, x): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.groups, C // self.groups).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) if self.dynamic_scale: q = q * N ** (-0.5) else: q = q * self.head_dim ** (-0.5) attn = q.transpose(-1, -2) @ k attn = attn.softmax(dim=-1) x = (attn @ v.transpose(-1, -2)).transpose(-1, -2) x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) return x class ChannelAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) def forward(self, x: Tensor): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) k = k * self.scale attn = k.transpose(-1, -2) @ v attn = attn.softmax(dim=-1) x = (attn @ q.transpose(-1, -2)).transpose(-1, -2) x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) return x class ChannelBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, ffn=True, cpe_act=False, v2=False): super().__init__() self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act) self.ffn = ffn self.norm1 = norm_layer(dim) attn_layer = ChannelAttentionV2 if v2 else ChannelAttention self.attn = attn_layer(dim, num_heads=num_heads, qkv_bias=qkv_bias) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act) if self.ffn: self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() else: self.norm2 = None self.mlp = None self.drop_path2 = None def forward(self, x: Tensor): (B, C, H, W) = x.shape x = self.cpe1(x).flatten(2).transpose(1, 2) cur = self.norm1(x) cur = self.attn(cur) x = x + self.drop_path1(cur) x = self.cpe2(x.transpose(1, 2).view(B, C, H, W)) if self.mlp is not None: x = x.flatten(2).transpose(1, 2) x = x + self.drop_path2(self.mlp(self.norm2(x))) x = x.transpose(1, 2).view(B, C, H, W) return x def window_partition(x: Tensor, window_size: Tuple[int, int]): (B, H, W, C) = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function def window_reverse(windows: Tensor, window_size: Tuple[int, int], H: int, W: int): C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, window_size, num_heads, qkv_bias=True): super().__init__() self.dim = dim self.window_size = window_size self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) self.softmax = nn.Softmax(dim=-1) def forward(self, x: Tensor): (B_, N, C) = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = self.softmax(attn) x = attn @ v x = x.transpose(1, 2).reshape(B_, N, C) x = self.proj(x) return x class SpatialBlock(nn.Module): def __init__(self, dim, num_heads, window_size=7, mlp_ratio=4.0, qkv_bias=True, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, ffn=True, cpe_act=False): super().__init__() self.dim = dim self.ffn = ffn self.num_heads = num_heads self.window_size = to_2tuple(window_size) self.mlp_ratio = mlp_ratio self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act) self.norm1 = norm_layer(dim) self.attn = WindowAttention(dim, self.window_size, num_heads=num_heads, qkv_bias=qkv_bias) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act) if self.ffn: self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() else: self.norm2 = None self.mlp = None self.drop_path1 = None def forward(self, x: Tensor): (B, C, H, W) = x.shape shortcut = self.cpe1(x).flatten(2).transpose(1, 2) x = self.norm1(shortcut) x = x.view(B, H, W, C) pad_l = pad_t = 0 pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) (_, Hp, Wp, _) = x.shape x_windows = window_partition(x, self.window_size) x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) attn_windows = self.attn(x_windows) attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) x = window_reverse(attn_windows, self.window_size, Hp, Wp) x = x[:, :H, :W, :].contiguous() x = x.view(B, H * W, C) x = shortcut + self.drop_path1(x) x = self.cpe2(x.transpose(1, 2).view(B, C, H, W)) if self.mlp is not None: x = x.flatten(2).transpose(1, 2) x = x + self.drop_path2(self.mlp(self.norm2(x))) x = x.transpose(1, 2).view(B, C, H, W) return x class DaVitStage(nn.Module): def __init__(self, in_chs, out_chs, depth=1, downsample=True, attn_types=('spatial', 'channel'), num_heads=3, window_size=7, mlp_ratio=4.0, qkv_bias=True, drop_path_rates=(0, 0), norm_layer=LayerNorm2d, norm_layer_cl=nn.LayerNorm, ffn=True, cpe_act=False, down_kernel_size=2, named_blocks=False, channel_attn_v2=False): super().__init__() self.grad_checkpointing = False if downsample: self.downsample = Downsample(in_chs, out_chs, kernel_size=down_kernel_size, norm_layer=norm_layer) else: self.downsample = nn.Identity() '' stage_blocks = [] for block_idx in range(depth): from collections import OrderedDict dual_attention_block = [] for (attn_idx, attn_type) in enumerate(attn_types): if attn_type == 'spatial': dual_attention_block.append(('spatial_block', SpatialBlock(dim=out_chs, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path=drop_path_rates[block_idx], norm_layer=norm_layer_cl, ffn=ffn, cpe_act=cpe_act, window_size=window_size))) elif attn_type == 'channel': dual_attention_block.append(('channel_block', ChannelBlock(dim=out_chs, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path=drop_path_rates[block_idx], norm_layer=norm_layer_cl, ffn=ffn, cpe_act=cpe_act, v2=channel_attn_v2))) if named_blocks: stage_blocks.append(nn.Sequential(OrderedDict(dual_attention_block))) else: stage_blocks.append(nn.Sequential(*[b[1] for b in dual_attention_block])) self.blocks = nn.Sequential(*stage_blocks) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x: Tensor): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class DaVit(nn.Module): def __init__(self, in_chans=3, depths=(1, 1, 3, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4, qkv_bias=True, norm_layer='layernorm2d', norm_layer_cl='layernorm', norm_eps=1e-05, attn_types=('spatial', 'channel'), ffn=True, cpe_act=False, down_kernel_size=2, channel_attn_v2=False, named_blocks=False, drop_rate=0.0, drop_path_rate=0.0, num_classes=1000, global_pool='avg', head_norm_first=False): super().__init__() num_stages = len(embed_dims) assert num_stages == len(num_heads) == len(depths) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps) self.num_classes = num_classes self.num_features = self.head_hidden_size = embed_dims[-1] self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] self.stem = Stem(in_chans, embed_dims[0], norm_layer=norm_layer) in_chs = embed_dims[0] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] for stage_idx in range(num_stages): out_chs = embed_dims[stage_idx] stage = DaVitStage(in_chs, out_chs, depth=depths[stage_idx], downsample=stage_idx > 0, attn_types=attn_types, num_heads=num_heads[stage_idx], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path_rates=dpr[stage_idx], norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, ffn=ffn, cpe_act=cpe_act, down_kernel_size=down_kernel_size, channel_attn_v2=channel_attn_v2, named_blocks=named_blocks) in_chs = out_chs stages.append(stage) self.feature_info += [dict(num_chs=out_chs, reduction=2, module=f'stages.{stage_idx}')] self.stages = nn.Sequential(*stages) if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+).downsample', (0,)), ('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None), ('^norm_pre', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for stage in self.stages: stage.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _convert_florence2(state_dict, model, prefix='vision_tower.'): import re out_dict = {} for (k, v) in state_dict.items(): if k.startswith(prefix): k = k.replace(prefix, '') else: continue k = re.sub('convs.([0-9]+)', 'stages.\\1.downsample', k) k = re.sub('blocks.([0-9]+)', 'stages.\\1.blocks', k) k = k.replace('downsample.proj', 'downsample.conv') k = k.replace('stages.0.downsample', 'stem') k = k.replace('window_attn.norm.', 'norm1.') k = k.replace('window_attn.fn.', 'attn.') k = k.replace('channel_attn.norm.', 'norm1.') k = k.replace('channel_attn.fn.', 'attn.') k = k.replace('ffn.norm.', 'norm2.') k = k.replace('ffn.fn.net.', 'mlp.') k = k.replace('conv1.fn.dw', 'cpe1.proj') k = k.replace('conv2.fn.dw', 'cpe2.proj') out_dict[k] = v return out_dict def checkpoint_filter_fn(state_dict, model): if 'head.fc.weight' in state_dict: return state_dict if 'state_dict' in state_dict: state_dict = state_dict['state_dict'] if 'vision_tower.convs.0.proj.weight' in state_dict: return _convert_florence2(state_dict, model) import re out_dict = {} for (k, v) in state_dict.items(): k = re.sub('patch_embeds.([0-9]+)', 'stages.\\1.downsample', k) k = re.sub('main_blocks.([0-9]+)', 'stages.\\1.blocks', k) k = k.replace('downsample.proj', 'downsample.conv') k = k.replace('stages.0.downsample', 'stem') k = k.replace('head.', 'head.fc.') k = k.replace('norms.', 'head.norm.') k = k.replace('cpe.0', 'cpe1') k = k.replace('cpe.1', 'cpe2') out_dict[k] = v return out_dict def _create_davit(variant, pretrained=False, **kwargs): default_out_indices = tuple((i for (i, _) in enumerate(kwargs.get('depths', (1, 1, 3, 1))))) out_indices = kwargs.pop('out_indices', default_out_indices) strict = True if variant.endswith('_fl'): strict = False model = build_model_with_cfg(DaVit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), pretrained_strict=strict, **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'davit_tiny.msft_in1k': _cfg(hf_hub_id='timm/'), 'davit_small.msft_in1k': _cfg(hf_hub_id='timm/'), 'davit_base.msft_in1k': _cfg(hf_hub_id='timm/'), 'davit_large': _cfg(), 'davit_huge': _cfg(), 'davit_giant': _cfg(), 'davit_base_fl.msft_florence2': _cfg(hf_hub_id='microsoft/Florence-2-base', num_classes=0, input_size=(3, 768, 768)), 'davit_huge_fl.msft_florence2': _cfg(hf_hub_id='microsoft/Florence-2-large', num_classes=0, input_size=(3, 768, 768))}) @register_model def davit_tiny(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 3, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24)) return _create_davit('davit_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def davit_small(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 9, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24)) return _create_davit('davit_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def davit_base(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32)) return _create_davit('davit_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def davit_large(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 9, 1), embed_dims=(192, 384, 768, 1536), num_heads=(6, 12, 24, 48)) return _create_davit('davit_large', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def davit_huge(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64)) return _create_davit('davit_huge', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def davit_giant(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 12, 3), embed_dims=(384, 768, 1536, 3072), num_heads=(12, 24, 48, 96)) return _create_davit('davit_giant', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def davit_base_fl(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32), window_size=12, down_kernel_size=3, channel_attn_v2=True, named_blocks=True) return _create_davit('davit_base_fl', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def davit_huge_fl(pretrained=False, **kwargs) -> DaVit: model_args = dict(depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64), window_size=12, down_kernel_size=3, channel_attn_v2=True, named_blocks=True) return _create_davit('davit_huge_fl', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/deit.py """""" from functools import partial from typing import Optional import torch from torch import nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import resample_abs_pos_embed from timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['VisionTransformerDistilled'] class VisionTransformerDistilled(VisionTransformer): def __init__(self, *args, **kwargs): weight_init = kwargs.pop('weight_init', '') super().__init__(*args, **kwargs, weight_init='skip') assert self.global_pool in ('token',) self.num_prefix_tokens = 2 self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim)) self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity() self.distilled_training = False self.init_weights(weight_init) def init_weights(self, mode=''): trunc_normal_(self.dist_token, std=0.02) super().init_weights(mode=mode) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^cls_token|pos_embed|patch_embed|dist_token', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def get_classifier(self) -> nn.Module: return (self.head, self.head_dist) def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def _pos_embed(self, x): if self.dynamic_img_size: (B, H, W, C) = x.shape pos_embed = resample_abs_pos_embed(self.pos_embed, (H, W), num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens) x = x.view(B, -1, C) else: pos_embed = self.pos_embed if self.no_embed_class: x = x + pos_embed x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) else: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) x = x + pos_embed return self.pos_drop(x) def forward_head(self, x, pre_logits: bool=False) -> torch.Tensor: (x, x_dist) = (x[:, 0], x[:, 1]) if pre_logits: return (x + x_dist) / 2 x = self.head(x) x_dist = self.head_dist(x_dist) if self.distilled_training and self.training and (not torch.jit.is_scripting()): return (x, x_dist) else: return (x + x_dist) / 2 def _create_deit(variant, pretrained=False, distilled=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model_cls = VisionTransformerDistilled if distilled else VisionTransformer model = build_model_with_cfg(model_cls, variant, pretrained, pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True), feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'deit_tiny_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'), 'deit_small_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'), 'deit_base_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'), 'deit_base_patch16_384.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit_tiny_distilled_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', classifier=('head', 'head_dist')), 'deit_small_distilled_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', classifier=('head', 'head_dist')), 'deit_base_distilled_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', classifier=('head', 'head_dist')), 'deit_base_distilled_patch16_384.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', input_size=(3, 384, 384), crop_pct=1.0, classifier=('head', 'head_dist')), 'deit3_small_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'), 'deit3_small_patch16_384.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_medium_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'), 'deit3_base_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'), 'deit3_base_patch16_384.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_large_patch16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'), 'deit3_large_patch16_384.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_huge_patch14_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'), 'deit3_small_patch16_224.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth', crop_pct=1.0), 'deit3_small_patch16_384.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_medium_patch16_224.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth', crop_pct=1.0), 'deit3_base_patch16_224.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth', crop_pct=1.0), 'deit3_base_patch16_384.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_large_patch16_224.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth', crop_pct=1.0), 'deit3_large_patch16_384.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_huge_patch14_224.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth', crop_pct=1.0)}) @register_model def deit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_deit('deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_small_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_deit('deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_base_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_base_distilled_patch16_384(pretrained=False, **kwargs) -> VisionTransformerDistilled: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit3_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-06) model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, {'deit3_small_patch16_224_in21ft1k': 'deit3_small_patch16_224.fb_in22k_ft_in1k', 'deit3_small_patch16_384_in21ft1k': 'deit3_small_patch16_384.fb_in22k_ft_in1k', 'deit3_medium_patch16_224_in21ft1k': 'deit3_medium_patch16_224.fb_in22k_ft_in1k', 'deit3_base_patch16_224_in21ft1k': 'deit3_base_patch16_224.fb_in22k_ft_in1k', 'deit3_base_patch16_384_in21ft1k': 'deit3_base_patch16_384.fb_in22k_ft_in1k', 'deit3_large_patch16_224_in21ft1k': 'deit3_large_patch16_224.fb_in22k_ft_in1k', 'deit3_large_patch16_384_in21ft1k': 'deit3_large_patch16_384.fb_in22k_ft_in1k', 'deit3_huge_patch14_224_in21ft1k': 'deit3_huge_patch14_224.fb_in22k_ft_in1k'}) # File: pytorch-image-models-main/timm/models/densenet.py """""" import re from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from torch.jit.annotations import List from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import BatchNormAct2d, get_norm_act_layer, BlurPool2d, create_classifier from ._builder import build_model_with_cfg from ._manipulate import MATCH_PREV_GROUP from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['DenseNet'] class DenseLayer(nn.Module): def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, drop_rate=0.0, grad_checkpointing=False): super(DenseLayer, self).__init__() (self.add_module('norm1', norm_layer(num_input_features)),) (self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)),) (self.add_module('norm2', norm_layer(bn_size * growth_rate)),) (self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),) self.drop_rate = float(drop_rate) self.grad_checkpointing = grad_checkpointing def bottleneck_fn(self, xs): concated_features = torch.cat(xs, 1) bottleneck_output = self.conv1(self.norm1(concated_features)) return bottleneck_output def any_requires_grad(self, x): for tensor in x: if tensor.requires_grad: return True return False @torch.jit.unused def call_checkpoint_bottleneck(self, x): def closure(*xs): return self.bottleneck_fn(xs) return cp.checkpoint(closure, *x) @torch.jit._overload_method def forward(self, x): pass @torch.jit._overload_method def forward(self, x): pass def forward(self, x): if isinstance(x, torch.Tensor): prev_features = [x] else: prev_features = x if self.grad_checkpointing and self.any_requires_grad(prev_features): if torch.jit.is_scripting(): raise Exception('Memory Efficient not supported in JIT') bottleneck_output = self.call_checkpoint_bottleneck(prev_features) else: bottleneck_output = self.bottleneck_fn(prev_features) new_features = self.conv2(self.norm2(bottleneck_output)) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class DenseBlock(nn.ModuleDict): _version = 2 def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=BatchNormAct2d, drop_rate=0.0, grad_checkpointing=False): super(DenseBlock, self).__init__() for i in range(num_layers): layer = DenseLayer(num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, norm_layer=norm_layer, drop_rate=drop_rate, grad_checkpointing=grad_checkpointing) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for (name, layer) in self.items(): new_features = layer(features) features.append(new_features) return torch.cat(features, 1) class DenseTransition(nn.Sequential): def __init__(self, num_input_features, num_output_features, norm_layer=BatchNormAct2d, aa_layer=None): super(DenseTransition, self).__init__() self.add_module('norm', norm_layer(num_input_features)) self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) if aa_layer is not None: self.add_module('pool', aa_layer(num_output_features, stride=2)) else: self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) class DenseNet(nn.Module): def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_classes=1000, in_chans=3, global_pool='avg', bn_size=4, stem_type='', act_layer='relu', norm_layer='batchnorm2d', aa_layer=None, drop_rate=0.0, proj_drop_rate=0.0, memory_efficient=False, aa_stem_only=True): self.num_classes = num_classes super(DenseNet, self).__init__() norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) deep_stem = 'deep' in stem_type num_init_features = growth_rate * 2 if aa_layer is None: stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: stem_pool = nn.Sequential(*[nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=num_init_features, stride=2)]) if deep_stem: stem_chs_1 = stem_chs_2 = growth_rate if 'tiered' in stem_type: stem_chs_1 = 3 * (growth_rate // 4) stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), ('norm0', norm_layer(stem_chs_1)), ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), ('norm1', norm_layer(stem_chs_2)), ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), ('norm2', norm_layer(num_init_features)), ('pool0', stem_pool)])) else: self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', norm_layer(num_init_features)), ('pool0', stem_pool)])) self.feature_info = [dict(num_chs=num_init_features, reduction=2, module=f'features.norm{(2 if deep_stem else 0)}')] current_stride = 4 num_features = num_init_features for (i, num_layers) in enumerate(block_config): block = DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, norm_layer=norm_layer, drop_rate=proj_drop_rate, grad_checkpointing=memory_efficient) module_name = f'denseblock{i + 1}' self.features.add_module(module_name, block) num_features = num_features + num_layers * growth_rate transition_aa_layer = None if aa_stem_only else aa_layer if i != len(block_config) - 1: self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] current_stride *= 2 trans = DenseTransition(num_input_features=num_features, num_output_features=num_features // 2, norm_layer=norm_layer, aa_layer=transition_aa_layer) self.features.add_module(f'transition{i + 1}', trans) num_features = num_features // 2 self.features.add_module('norm5', norm_layer(num_features)) self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] self.num_features = self.head_hidden_size = num_features (global_pool, classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.classifier = classifier for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^features\\.conv[012]|features\\.norm[012]|features\\.pool[012]', blocks='^features\\.(?:denseblock|transition)(\\d+)' if coarse else [('^features\\.denseblock(\\d+)\\.denselayer(\\d+)', None), ('^features\\.transition(\\d+)', MATCH_PREV_GROUP)]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for b in self.features.modules(): if isinstance(b, DenseLayer): b.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): return self.features(x) def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _filter_torchvision_pretrained(state_dict): pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] return state_dict def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): kwargs['growth_rate'] = growth_rate kwargs['block_config'] = block_config return build_model_with_cfg(DenseNet, variant, pretrained, feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.conv0', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'densenet121.ra_in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'densenetblur121d.ra_in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'densenet264d.untrained': _cfg(), 'densenet121.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet169.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet201.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet161.tv_in1k': _cfg(hf_hub_id='timm/')}) @register_model def densenet121(pretrained=False, **kwargs) -> DenseNet: model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16)) model = _create_densenet('densenet121', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenetblur121d(pretrained=False, **kwargs) -> DenseNet: model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', aa_layer=BlurPool2d) model = _create_densenet('densenetblur121d', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet169(pretrained=False, **kwargs) -> DenseNet: model_args = dict(growth_rate=32, block_config=(6, 12, 32, 32)) model = _create_densenet('densenet169', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet201(pretrained=False, **kwargs) -> DenseNet: model_args = dict(growth_rate=32, block_config=(6, 12, 48, 32)) model = _create_densenet('densenet201', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet161(pretrained=False, **kwargs) -> DenseNet: model_args = dict(growth_rate=48, block_config=(6, 12, 36, 24)) model = _create_densenet('densenet161', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet264d(pretrained=False, **kwargs) -> DenseNet: model_args = dict(growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep') model = _create_densenet('densenet264d', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, {'tv_densenet121': 'densenet121.tv_in1k'}) # File: pytorch-image-models-main/timm/models/dla.py """""" import math from typing import List, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['DLA'] class DlaBasic(nn.Module): def __init__(self, inplanes, planes, stride=1, dilation=1, **_): super(DlaBasic, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.stride = stride def forward(self, x, shortcut: Optional[torch.Tensor]=None, children: Optional[List[torch.Tensor]]=None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += shortcut out = self.relu(out) return out class DlaBottleneck(nn.Module): expansion = 2 def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): super(DlaBottleneck, self).__init__() self.stride = stride mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) mid_planes = mid_planes // self.expansion self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(outplanes) self.relu = nn.ReLU(inplace=True) def forward(self, x, shortcut: Optional[torch.Tensor]=None, children: Optional[List[torch.Tensor]]=None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += shortcut out = self.relu(out) return out class DlaBottle2neck(nn.Module): expansion = 2 def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): super(DlaBottle2neck, self).__init__() self.is_first = stride > 1 self.scale = scale mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) mid_planes = mid_planes // self.expansion self.width = mid_planes self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes * scale) num_scale_convs = max(1, scale - 1) convs = [] bns = [] for _ in range(num_scale_convs): convs.append(nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False)) bns.append(nn.BatchNorm2d(mid_planes)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if self.is_first else None self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(outplanes) self.relu = nn.ReLU(inplace=True) def forward(self, x, shortcut: Optional[torch.Tensor]=None, children: Optional[List[torch.Tensor]]=None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) spx = torch.split(out, self.width, 1) spo = [] sp = spx[0] for (i, (conv, bn)) in enumerate(zip(self.convs, self.bns)): if i == 0 or self.is_first: sp = spx[i] else: sp = sp + spx[i] sp = conv(sp) sp = bn(sp) sp = self.relu(sp) spo.append(sp) if self.scale > 1: if self.pool is not None: spo.append(self.pool(spx[-1])) else: spo.append(spx[-1]) out = torch.cat(spo, 1) out = self.conv3(out) out = self.bn3(out) out += shortcut out = self.relu(out) return out class DlaRoot(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, shortcut): super(DlaRoot, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.shortcut = shortcut def forward(self, x_children: List[torch.Tensor]): x = self.conv(torch.cat(x_children, 1)) x = self.bn(x) if self.shortcut: x += x_children[0] x = self.relu(x) return x class DlaTree(nn.Module): def __init__(self, levels, block, in_channels, out_channels, stride=1, dilation=1, cardinality=1, base_width=64, level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False): super(DlaTree, self).__init__() if root_dim == 0: root_dim = 2 * out_channels if level_root: root_dim += in_channels self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() self.project = nn.Identity() cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) if levels == 1: self.tree1 = block(in_channels, out_channels, stride, **cargs) self.tree2 = block(out_channels, out_channels, 1, **cargs) if in_channels != out_channels: self.project = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels)) self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) else: cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) self.tree1 = DlaTree(levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs) self.tree2 = DlaTree(levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs) self.root = None self.level_root = level_root self.root_dim = root_dim self.levels = levels def forward(self, x, shortcut: Optional[torch.Tensor]=None, children: Optional[List[torch.Tensor]]=None): if children is None: children = [] bottom = self.downsample(x) shortcut = self.project(bottom) if self.level_root: children.append(bottom) x1 = self.tree1(x, shortcut) if self.root is not None: x2 = self.tree2(x1) x = self.root([x2, x1] + children) else: children.append(x1) x = self.tree2(x1, None, children) return x class DLA(nn.Module): def __init__(self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, global_pool='avg', cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, drop_rate=0.0): super(DLA, self).__init__() self.channels = channels self.num_classes = num_classes self.cardinality = cardinality self.base_width = base_width assert output_stride == 32 self.base_layer = nn.Sequential(nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), nn.BatchNorm2d(channels[0]), nn.ReLU(inplace=True)) self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) self.feature_info = [dict(num_chs=channels[0], reduction=1, module='level0'), dict(num_chs=channels[1], reduction=2, module='level1'), dict(num_chs=channels[2], reduction=4, module='level2'), dict(num_chs=channels[3], reduction=8, module='level3'), dict(num_chs=channels[4], reduction=16, module='level4'), dict(num_chs=channels[5], reduction=32, module='level5')] self.num_features = self.head_hidden_size = channels[-1] (self.global_pool, self.head_drop, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, use_conv=True, drop_rate=drop_rate) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): modules = [] for i in range(convs): modules.extend([nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)]) inplanes = planes return nn.Sequential(*modules) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^base_layer', blocks='^level(\\d+)' if coarse else [('^level(\\d+)\\.tree(\\d+)', None), ('^level(\\d+)\\.root', (2,)), ('^level(\\d+)', (1,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() def forward_features(self, x): x = self.base_layer(x) x = self.level0(x) x = self.level1(x) x = self.level2(x) x = self.level3(x) x = self.level4(x) x = self.level5(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) if pre_logits: return self.flatten(x) x = self.fc(x) return self.flatten(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_dla(variant, pretrained=False, **kwargs): return build_model_with_cfg(DLA, variant, pretrained, pretrained_strict=False, feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'base_layer.0', 'classifier': 'fc', **kwargs} default_cfgs = generate_default_cfgs({'dla34.in1k': _cfg(hf_hub_id='timm/'), 'dla46_c.in1k': _cfg(hf_hub_id='timm/'), 'dla46x_c.in1k': _cfg(hf_hub_id='timm/'), 'dla60x_c.in1k': _cfg(hf_hub_id='timm/'), 'dla60.in1k': _cfg(hf_hub_id='timm/'), 'dla60x.in1k': _cfg(hf_hub_id='timm/'), 'dla102.in1k': _cfg(hf_hub_id='timm/'), 'dla102x.in1k': _cfg(hf_hub_id='timm/'), 'dla102x2.in1k': _cfg(hf_hub_id='timm/'), 'dla169.in1k': _cfg(hf_hub_id='timm/'), 'dla60_res2net.in1k': _cfg(hf_hub_id='timm/'), 'dla60_res2next.in1k': _cfg(hf_hub_id='timm/')}) @register_model def dla60_res2net(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=1, base_width=28) return _create_dla('dla60_res2net', pretrained, **dict(model_args, **kwargs)) @register_model def dla60_res2next(pretrained=False, **kwargs): model_args = dict(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=8, base_width=4) return _create_dla('dla60_res2next', pretrained, **dict(model_args, **kwargs)) @register_model def dla34(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], block=DlaBasic) return _create_dla('dla34', pretrained, **dict(model_args, **kwargs)) @register_model def dla46_c(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck) return _create_dla('dla46_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla46x_c(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla46x_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla60x_c(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla60x_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla60(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck) return _create_dla('dla60', pretrained, **dict(model_args, **kwargs)) @register_model def dla60x(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla60x', pretrained, **dict(model_args, **kwargs)) @register_model def dla102(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, shortcut_root=True) return _create_dla('dla102', pretrained, **dict(model_args, **kwargs)) @register_model def dla102x(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True) return _create_dla('dla102x', pretrained, **dict(model_args, **kwargs)) @register_model def dla102x2(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True) return _create_dla('dla102x2', pretrained, **dict(model_args, **kwargs)) @register_model def dla169(pretrained=False, **kwargs) -> DLA: model_args = dict(levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, shortcut_root=True) return _create_dla('dla169', pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/dpn.py """""" from collections import OrderedDict from functools import partial from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier, get_norm_act_layer from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['DPN'] class CatBnAct(nn.Module): def __init__(self, in_chs, norm_layer=BatchNormAct2d): super(CatBnAct, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) @torch.jit._overload_method def forward(self, x): pass @torch.jit._overload_method def forward(self, x): pass def forward(self, x): if isinstance(x, tuple): x = torch.cat(x, dim=1) return self.bn(x) class BnActConv2d(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): super(BnActConv2d, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) def forward(self, x): return self.conv(self.bn(x)) class DualPathBlock(nn.Module): def __init__(self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False): super(DualPathBlock, self).__init__() self.num_1x1_c = num_1x1_c self.inc = inc self.b = b if block_type == 'proj': self.key_stride = 1 self.has_proj = True elif block_type == 'down': self.key_stride = 2 self.has_proj = True else: assert block_type == 'normal' self.key_stride = 1 self.has_proj = False self.c1x1_w_s1 = None self.c1x1_w_s2 = None if self.has_proj: if self.key_stride == 2: self.c1x1_w_s2 = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) else: self.c1x1_w_s1 = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) self.c3x3_b = BnActConv2d(in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) if b: self.c1x1_c = CatBnAct(in_chs=num_3x3_b) self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) else: self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) self.c1x1_c1 = None self.c1x1_c2 = None @torch.jit._overload_method def forward(self, x): pass @torch.jit._overload_method def forward(self, x): pass def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: if isinstance(x, tuple): x_in = torch.cat(x, dim=1) else: x_in = x if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: x_s1 = x[0] x_s2 = x[1] else: if self.c1x1_w_s1 is not None: x_s = self.c1x1_w_s1(x_in) else: x_s = self.c1x1_w_s2(x_in) x_s1 = x_s[:, :self.num_1x1_c, :, :] x_s2 = x_s[:, self.num_1x1_c:, :, :] x_in = self.c1x1_a(x_in) x_in = self.c3x3_b(x_in) x_in = self.c1x1_c(x_in) if self.c1x1_c1 is not None: out1 = self.c1x1_c1(x_in) out2 = self.c1x1_c2(x_in) else: out1 = x_in[:, :self.num_1x1_c, :, :] out2 = x_in[:, self.num_1x1_c:, :, :] resid = x_s1 + out1 dense = torch.cat([x_s2, out2], dim=1) return (resid, dense) class DPN(nn.Module): def __init__(self, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), k_r=96, groups=32, num_classes=1000, in_chans=3, output_stride=32, global_pool='avg', small=False, num_init_features=64, b=False, drop_rate=0.0, norm_layer='batchnorm2d', act_layer='relu', fc_act_layer='elu'): super(DPN, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.b = b assert output_stride == 32 norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=act_layer), eps=0.001) fc_norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=fc_act_layer), eps=0.001, inplace=False) bw_factor = 1 if small else 4 blocks = OrderedDict() blocks['conv1_1'] = ConvNormAct(in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] bw = 64 * bw_factor inc = inc_sec[0] r = k_r * bw // (64 * bw_factor) blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) in_chs = bw + 3 * inc for i in range(2, k_sec[0] + 1): blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] bw = 128 * bw_factor inc = inc_sec[1] r = k_r * bw // (64 * bw_factor) blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[1] + 1): blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] bw = 256 * bw_factor inc = inc_sec[2] r = k_r * bw // (64 * bw_factor) blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[2] + 1): blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] bw = 512 * bw_factor inc = inc_sec[3] r = k_r * bw // (64 * bw_factor) blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[3] + 1): blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) self.num_features = self.head_hidden_size = in_chs self.features = nn.Sequential(blocks) (self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^features\\.conv1', blocks=[('^features\\.conv(\\d+)' if coarse else '^features\\.conv(\\d+)_(\\d+)', None), ('^features\\.conv5_bn_ac', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() def forward_features(self, x): return self.features(x) def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) if pre_logits: return self.flatten(x) x = self.classifier(x) return self.flatten(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_dpn(variant, pretrained=False, **kwargs): return build_model_with_cfg(DPN, variant, pretrained, feature_cfg=dict(feature_concat=True, flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'dpn48b.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'dpn68.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn68b.ra_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'dpn68b.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn92.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn98.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn131.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn107.mx_in1k': _cfg(hf_hub_id='timm/')}) @register_model def dpn48b(pretrained=False, **kwargs) -> DPN: model_args = dict(small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 6, 3), inc_sec=(16, 32, 32, 64), act_layer='silu') return _create_dpn('dpn48b', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn68(pretrained=False, **kwargs) -> DPN: model_args = dict(small=True, num_init_features=10, k_r=128, groups=32, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn68b(pretrained=False, **kwargs) -> DPN: model_args = dict(small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68b', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn92(pretrained=False, **kwargs) -> DPN: model_args = dict(num_init_features=64, k_r=96, groups=32, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128)) return _create_dpn('dpn92', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn98(pretrained=False, **kwargs) -> DPN: model_args = dict(num_init_features=96, k_r=160, groups=40, k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn98', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn131(pretrained=False, **kwargs) -> DPN: model_args = dict(num_init_features=128, k_r=160, groups=40, k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn131', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn107(pretrained=False, **kwargs) -> DPN: model_args = dict(num_init_features=128, k_r=200, groups=50, k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128)) return _create_dpn('dpn107', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/edgenext.py """""" import math from functools import partial from typing import Optional, Tuple import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, create_conv2d, NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import named_apply, checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['EdgeNeXt'] @register_notrace_module class PositionalEncodingFourier(nn.Module): def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim def forward(self, shape: Tuple[int, int, int]): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool) y_embed = inv_mask.cumsum(1, dtype=torch.float32) x_embed = inv_mask.cumsum(2, dtype=torch.float32) eps = 1e-06 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.hidden_dim, dtype=torch.int64, device=device).to(torch.float32) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos.to(dtype)) return pos class ConvBlock(nn.Module): def __init__(self, dim, dim_out=None, kernel_size=7, stride=1, conv_bias=True, expand_ratio=4, ls_init_value=1e-06, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, drop_path=0.0): super().__init__() dim_out = dim_out or dim self.shortcut_after_dw = stride > 1 or dim != dim_out self.conv_dw = create_conv2d(dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias) self.norm = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.shortcut_after_dw: shortcut = x x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) x = shortcut + self.drop_path(x) return x class CrossCovarianceAttn(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1) (q, k, v) = qkv.unbind(0) attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class SplitTransposeBlock(nn.Module): def __init__(self, dim, num_scales=1, num_heads=8, expand_ratio=4, use_pos_emb=True, conv_bias=True, qkv_bias=True, ls_init_value=1e-06, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, drop_path=0.0, attn_drop=0.0, proj_drop=0.0): super().__init__() width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales))) self.width = width self.num_scales = max(1, num_scales - 1) convs = [] for i in range(self.num_scales): convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias)) self.convs = nn.ModuleList(convs) self.pos_embd = None if use_pos_emb: self.pos_embd = PositionalEncodingFourier(dim=dim) self.norm_xca = norm_layer(dim) self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.xca = CrossCovarianceAttn(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.norm = norm_layer(dim, eps=1e-06) self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = x spx = x.chunk(len(self.convs) + 1, dim=1) spo = [] sp = spx[0] for (i, conv) in enumerate(self.convs): if i > 0: sp = sp + spx[i] sp = conv(sp) spo.append(sp) spo.append(spx[-1]) x = torch.cat(spo, 1) (B, C, H, W) = x.shape x = x.reshape(B, C, H * W).permute(0, 2, 1) if self.pos_embd is not None: pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x))) x = x.reshape(B, H, W, C) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) x = shortcut + self.drop_path(x) return x class EdgeNeXtStage(nn.Module): def __init__(self, in_chs, out_chs, stride=2, depth=2, num_global_blocks=1, num_heads=4, scales=2, kernel_size=7, expand_ratio=4, use_pos_emb=False, downsample_block=False, conv_bias=True, ls_init_value=1.0, drop_path_rates=None, norm_layer=LayerNorm2d, norm_layer_cl=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU): super().__init__() self.grad_checkpointing = False if downsample_block or stride == 1: self.downsample = nn.Identity() else: self.downsample = nn.Sequential(norm_layer(in_chs), nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias)) in_chs = out_chs stage_blocks = [] for i in range(depth): if i < depth - num_global_blocks: stage_blocks.append(ConvBlock(dim=in_chs, dim_out=out_chs, stride=stride if downsample_block and i == 0 else 1, conv_bias=conv_bias, kernel_size=kernel_size, expand_ratio=expand_ratio, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer)) else: stage_blocks.append(SplitTransposeBlock(dim=in_chs, num_scales=scales, num_heads=num_heads, expand_ratio=expand_ratio, use_pos_emb=use_pos_emb, conv_bias=conv_bias, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer)) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EdgeNeXt(nn.Module): def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', dims=(24, 48, 88, 168), depths=(3, 3, 9, 3), global_block_counts=(0, 1, 1, 1), kernel_sizes=(3, 5, 7, 9), heads=(8, 8, 8, 8), d2_scales=(2, 2, 3, 4), use_pos_emb=(False, True, False, False), ls_init_value=1e-06, head_init_scale=1.0, expand_ratio=4, downsample_block=False, conv_bias=True, stem_type='patch', head_norm_first=False, act_layer=nn.GELU, drop_path_rate=0.0, drop_rate=0.0): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.drop_rate = drop_rate norm_layer = partial(LayerNorm2d, eps=1e-06) norm_layer_cl = partial(nn.LayerNorm, eps=1e-06) self.feature_info = [] assert stem_type in ('patch', 'overlap') if stem_type == 'patch': self.stem = nn.Sequential(nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias), norm_layer(dims[0])) else: self.stem = nn.Sequential(nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias), norm_layer(dims[0])) curr_stride = 4 stages = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] in_chs = dims[0] for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 curr_stride *= stride stages.append(EdgeNeXtStage(in_chs=in_chs, out_chs=dims[i], stride=stride, depth=depths[i], num_global_blocks=global_block_counts[i], num_heads=heads[i], drop_path_rates=dp_rates[i], scales=d2_scales[i], expand_ratio=expand_ratio, kernel_size=kernel_sizes[i], use_pos_emb=use_pos_emb[i], ls_init_value=ls_init_value, downsample_block=downsample_block, conv_bias=conv_bias, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, act_layer=act_layer)) in_chs = dims[i] self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = dims[-1] if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+)\\.downsample', (0,)), ('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None), ('^norm_pre', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_tf_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_tf_(module.weight, std=0.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict if 'model_ema' in state_dict: state_dict = state_dict['model_ema'] elif 'model' in state_dict: state_dict = state_dict['model'] elif 'state_dict' in state_dict: state_dict = state_dict['state_dict'] out_dict = {} import re for (k, v) in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub('stages.([0-9]+).([0-9]+)', 'stages.\\1.blocks.\\2', k) k = re.sub('downsample_layers.([0-9]+).([0-9]+)', 'stages.\\1.downsample.\\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_edgenext(variant, pretrained=False, **kwargs): model = build_model_with_cfg(EdgeNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'edgenext_xx_small.in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_x_small.in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_small.usi_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'edgenext_base.usi_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'edgenext_base.in21k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'edgenext_small_rw.sw_in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 320, 320), test_crop_pct=1.0)}) @register_model def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_x_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304)) return _create_edgenext('edgenext_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584]) return _create_edgenext('edgenext_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 192, 384), downsample_block=True, conv_bias=False, stem_type='overlap') return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/efficientformer.py """""" from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, to_2tuple, Mlp, ndgrid from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['EfficientFormer'] EfficientFormer_width = {'l1': (48, 96, 224, 448), 'l3': (64, 128, 320, 512), 'l7': (96, 192, 384, 768)} EfficientFormer_depth = {'l1': (3, 2, 6, 4), 'l3': (4, 4, 12, 6), 'l7': (6, 6, 18, 8)} class Attention(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__(self, dim=384, key_dim=32, num_heads=8, attn_ratio=4, resolution=7): super().__init__() self.num_heads = num_heads self.scale = key_dim ** (-0.5) self.key_dim = key_dim self.key_attn_dim = key_dim * num_heads self.val_dim = int(attn_ratio * key_dim) self.val_attn_dim = self.val_dim * num_heads self.attn_ratio = attn_ratio self.qkv = nn.Linear(dim, self.key_attn_dim * 2 + self.val_attn_dim) self.proj = nn.Linear(self.val_attn_dim, dim) resolution = to_2tuple(resolution) pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = rel_pos[0] * resolution[1] + rel_pos[1] self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) self.register_buffer('attention_bias_idxs', rel_pos) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): (B, N, C) = x.shape qkv = self.qkv(x) qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) (q, k, v) = qkv.split([self.key_dim, self.key_dim, self.val_dim], dim=3) attn = q @ k.transpose(-2, -1) * self.scale attn = attn + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) x = self.proj(x) return x class Stem4(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): super().__init__() self.stride = 4 self.add_module('conv1', nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1)) self.add_module('norm1', norm_layer(out_chs // 2)) self.add_module('act1', act_layer()) self.add_module('conv2', nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1)) self.add_module('norm2', norm_layer(out_chs)) self.add_module('act2', act_layer()) class Downsample(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=None, norm_layer=nn.BatchNorm2d): super().__init__() if padding is None: padding = kernel_size // 2 self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) self.norm = norm_layer(out_chs) def forward(self, x): x = self.conv(x) x = self.norm(x) return x class Flat(nn.Module): def __init__(self): super().__init__() def forward(self, x): x = x.flatten(2).transpose(1, 2) return x class Pooling(nn.Module): def __init__(self, pool_size=3): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, x): return self.pool(x) - x class ConvMlpWithNorm(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, 1) self.norm1 = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, 1) self.norm2 = norm_layer(out_features) if norm_layer is not None else nn.Identity() self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.norm1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.norm2(x) x = self.drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class MetaBlock1d(nn.Module): def __init__(self, dim, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, proj_drop=0.0, drop_path=0.0, layer_scale_init_value=1e-05): super().__init__() self.norm1 = norm_layer(dim) self.token_mixer = Attention(dim) self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.ls1 = LayerScale(dim, layer_scale_init_value) self.ls2 = LayerScale(dim, layer_scale_init_value) def forward(self, x): x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x)))) x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x)))) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class MetaBlock2d(nn.Module): def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, proj_drop=0.0, drop_path=0.0, layer_scale_init_value=1e-05): super().__init__() self.token_mixer = Pooling(pool_size=pool_size) self.ls1 = LayerScale2d(dim, layer_scale_init_value) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = ConvMlpWithNorm(dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=proj_drop) self.ls2 = LayerScale2d(dim, layer_scale_init_value) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.ls1(self.token_mixer(x))) x = x + self.drop_path2(self.ls2(self.mlp(x))) return x class EfficientFormerStage(nn.Module): def __init__(self, dim, dim_out, depth, downsample=True, num_vit=1, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, norm_layer_cl=nn.LayerNorm, proj_drop=0.0, drop_path=0.0, layer_scale_init_value=1e-05): super().__init__() self.grad_checkpointing = False if downsample: self.downsample = Downsample(in_chs=dim, out_chs=dim_out, norm_layer=norm_layer) dim = dim_out else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] if num_vit and num_vit >= depth: blocks.append(Flat()) for block_idx in range(depth): remain_idx = depth - block_idx - 1 if num_vit and num_vit > remain_idx: blocks.append(MetaBlock1d(dim, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer_cl, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value)) else: blocks.append(MetaBlock2d(dim, pool_size=pool_size, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value)) if num_vit and num_vit == remain_idx: blocks.append(Flat()) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EfficientFormer(nn.Module): def __init__(self, depths, embed_dims=None, in_chans=3, num_classes=1000, global_pool='avg', downsamples=None, num_vit=0, mlp_ratios=4, pool_size=3, layer_scale_init_value=1e-05, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, norm_layer_cl=nn.LayerNorm, drop_rate=0.0, proj_drop_rate=0.0, drop_path_rate=0.0, **kwargs): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.stem = Stem4(in_chans, embed_dims[0], norm_layer=norm_layer) prev_dim = embed_dims[0] self.num_stages = len(depths) last_stage = self.num_stages - 1 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] downsamples = downsamples or (False,) + (True,) * (self.num_stages - 1) stages = [] self.feature_info = [] for i in range(self.num_stages): stage = EfficientFormerStage(prev_dim, embed_dims[i], depths[i], downsample=downsamples[i], num_vit=num_vit if i == last_stage else 0, pool_size=pool_size, mlp_ratio=mlp_ratios, act_layer=act_layer, norm_layer_cl=norm_layer_cl, norm_layer=norm_layer, proj_drop=proj_drop_rate, drop_path=dpr[i], layer_scale_init_value=layer_scale_init_value) prev_dim = embed_dims[i] stages.append(stage) self.feature_info += [dict(num_chs=embed_dims[i], reduction=2 ** (1 + i), module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = embed_dims[-1] self.norm = norm_layer_cl(self.num_features) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.distilled_training = False self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {k for (k, _) in self.named_parameters() if 'attention_biases' in k} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks=[('^stages\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return (self.head, self.head_dist) def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stages), indices) x = self.stem(x) (B, C, H, W) = x.shape last_idx = self.num_stages - 1 if torch.jit.is_scripting() or not stop_early: stages = self.stages else: stages = self.stages[:max_index + 1] feat_idx = 0 for (feat_idx, stage) in enumerate(stages): x = stage(x) if feat_idx < last_idx: (B, C, H, W) = x.shape if feat_idx in take_indices: if feat_idx == last_idx: x_inter = self.norm(x) if norm else x intermediates.append(x_inter.reshape(B, H // 2, W // 2, -1).permute(0, 3, 1, 2)) else: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) if pre_logits: return x (x, x_dist) = (self.head(x), self.head_dist(x)) if self.distilled_training and self.training and (not torch.jit.is_scripting()): return (x, x_dist) else: return (x + x_dist) / 2 def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'stem.0.weight' in state_dict: return state_dict out_dict = {} import re stage_idx = 0 for (k, v) in state_dict.items(): if k.startswith('patch_embed'): k = k.replace('patch_embed.0', 'stem.conv1') k = k.replace('patch_embed.1', 'stem.norm1') k = k.replace('patch_embed.3', 'stem.conv2') k = k.replace('patch_embed.4', 'stem.norm2') if re.match('network\\.(\\d+)\\.proj\\.weight', k): stage_idx += 1 k = re.sub('network.(\\d+).(\\d+)', f'stages.{stage_idx}.blocks.\\2', k) k = re.sub('network.(\\d+).proj', f'stages.{stage_idx}.downsample.conv', k) k = re.sub('network.(\\d+).norm', f'stages.{stage_idx}.downsample.norm', k) k = re.sub('layer_scale_([0-9])', 'ls\\1.gamma', k) k = k.replace('dist_head', 'head_dist') out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': ('head', 'head_dist'), **kwargs} default_cfgs = generate_default_cfgs({'efficientformer_l1.snap_dist_in1k': _cfg(hf_hub_id='timm/'), 'efficientformer_l3.snap_dist_in1k': _cfg(hf_hub_id='timm/'), 'efficientformer_l7.snap_dist_in1k': _cfg(hf_hub_id='timm/')}) def _create_efficientformer(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 4) model = build_model_with_cfg(EfficientFormer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model @register_model def efficientformer_l1(pretrained=False, **kwargs) -> EfficientFormer: model_args = dict(depths=EfficientFormer_depth['l1'], embed_dims=EfficientFormer_width['l1'], num_vit=1) return _create_efficientformer('efficientformer_l1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformer_l3(pretrained=False, **kwargs) -> EfficientFormer: model_args = dict(depths=EfficientFormer_depth['l3'], embed_dims=EfficientFormer_width['l3'], num_vit=4) return _create_efficientformer('efficientformer_l3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformer_l7(pretrained=False, **kwargs) -> EfficientFormer: model_args = dict(depths=EfficientFormer_depth['l7'], embed_dims=EfficientFormer_width['l7'], num_vit=8) return _create_efficientformer('efficientformer_l7', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/efficientformer_v2.py """""" import math from functools import partial from typing import Dict, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_conv2d, create_norm_layer, get_act_layer, get_norm_layer, ConvNormAct from timm.layers import DropPath, trunc_normal_, to_2tuple, to_ntuple, ndgrid from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['EfficientFormerV2'] EfficientFormer_width = {'L': (40, 80, 192, 384), 'S2': (32, 64, 144, 288), 'S1': (32, 48, 120, 224), 'S0': (32, 48, 96, 176)} EfficientFormer_depth = {'L': (5, 5, 15, 10), 'S2': (4, 4, 12, 8), 'S1': (3, 3, 9, 6), 'S0': (2, 2, 6, 4)} EfficientFormer_expansion_ratios = {'L': (4, 4, (4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 4, 3, 3, 3, 3, 4, 4, 4)), 'S2': (4, 4, (4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 3, 3, 3, 3, 4, 4)), 'S1': (4, 4, (4, 4, 3, 3, 3, 3, 4, 4, 4), (4, 4, 3, 3, 4, 4)), 'S0': (4, 4, (4, 3, 3, 3, 4, 4), (4, 3, 3, 4))} class ConvNorm(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=True, norm_layer='batchnorm2d', norm_kwargs=None): norm_kwargs = norm_kwargs or {} super(ConvNorm, self).__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn = create_norm_layer(norm_layer, out_channels, **norm_kwargs) def forward(self, x): x = self.conv(x) x = self.bn(x) return x class Attention2d(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__(self, dim=384, key_dim=32, num_heads=8, attn_ratio=4, resolution=7, act_layer=nn.GELU, stride=None): super().__init__() self.num_heads = num_heads self.scale = key_dim ** (-0.5) self.key_dim = key_dim resolution = to_2tuple(resolution) if stride is not None: resolution = tuple([math.ceil(r / stride) for r in resolution]) self.stride_conv = ConvNorm(dim, dim, kernel_size=3, stride=stride, groups=dim) self.upsample = nn.Upsample(scale_factor=stride, mode='bilinear') else: self.stride_conv = None self.upsample = None self.resolution = resolution self.N = self.resolution[0] * self.resolution[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio kh = self.key_dim * self.num_heads self.q = ConvNorm(dim, kh) self.k = ConvNorm(dim, kh) self.v = ConvNorm(dim, self.dh) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, groups=self.dh) self.talking_head1 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.talking_head2 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.act = act_layer() self.proj = ConvNorm(self.dh, dim, 1) pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = rel_pos[0] * self.resolution[1] + rel_pos[1] self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, self.N)) self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos), persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): (B, C, H, W) = x.shape if self.stride_conv is not None: x = self.stride_conv(x) q = self.q(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = q @ k * self.scale attn = attn + self.get_attention_biases(x.device) attn = self.talking_head1(attn) attn = attn.softmax(dim=-1) attn = self.talking_head2(attn) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution[0], self.resolution[1]) + v_local if self.upsample is not None: x = self.upsample(x) x = self.act(x) x = self.proj(x) return x class LocalGlobalQuery(torch.nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.pool = nn.AvgPool2d(1, 2, 0) self.local = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=2, padding=1, groups=in_dim) self.proj = ConvNorm(in_dim, out_dim, 1) def forward(self, x): local_q = self.local(x) pool_q = self.pool(x) q = local_q + pool_q q = self.proj(q) return q class Attention2dDownsample(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__(self, dim=384, key_dim=16, num_heads=8, attn_ratio=4, resolution=7, out_dim=None, act_layer=nn.GELU): super().__init__() self.num_heads = num_heads self.scale = key_dim ** (-0.5) self.key_dim = key_dim self.resolution = to_2tuple(resolution) self.resolution2 = tuple([math.ceil(r / 2) for r in self.resolution]) self.N = self.resolution[0] * self.resolution[1] self.N2 = self.resolution2[0] * self.resolution2[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio self.out_dim = out_dim or dim kh = self.key_dim * self.num_heads self.q = LocalGlobalQuery(dim, kh) self.k = ConvNorm(dim, kh, 1) self.v = ConvNorm(dim, self.dh, 1) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, stride=2, groups=self.dh) self.act = act_layer() self.proj = ConvNorm(self.dh, self.out_dim, 1) self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.N)) k_pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) q_pos = torch.stack(ndgrid(torch.arange(0, self.resolution[0], step=2), torch.arange(0, self.resolution[1], step=2))).flatten(1) rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() rel_pos = rel_pos[0] * self.resolution[1] + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): (B, C, H, W) = x.shape q = self.q(x).reshape(B, self.num_heads, -1, self.N2).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = q @ k * self.scale attn = attn + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution2[0], self.resolution2[1]) + v_local x = self.act(x) x = self.proj(x) return x class Downsample(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=1, resolution=7, use_attn=False, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d): super().__init__() kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) padding = to_2tuple(padding) norm_layer = norm_layer or nn.Identity() self.conv = ConvNorm(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding, norm_layer=norm_layer) if use_attn: self.attn = Attention2dDownsample(dim=in_chs, out_dim=out_chs, resolution=resolution, act_layer=act_layer) else: self.attn = None def forward(self, x): out = self.conv(x) if self.attn is not None: return self.attn(x) + out return out class ConvMlpWithNorm(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, drop=0.0, mid_conv=False): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = ConvNormAct(in_features, hidden_features, 1, bias=True, norm_layer=norm_layer, act_layer=act_layer) if mid_conv: self.mid = ConvNormAct(hidden_features, hidden_features, 3, groups=hidden_features, bias=True, norm_layer=norm_layer, act_layer=act_layer) else: self.mid = nn.Identity() self.drop1 = nn.Dropout(drop) self.fc2 = ConvNorm(hidden_features, out_features, 1, norm_layer=norm_layer) self.drop2 = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.mid(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class EfficientFormerV2Block(nn.Module): def __init__(self, dim, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, proj_drop=0.0, drop_path=0.0, layer_scale_init_value=1e-05, resolution=7, stride=None, use_attn=True): super().__init__() if use_attn: self.token_mixer = Attention2d(dim, resolution=resolution, act_layer=act_layer, stride=stride) self.ls1 = LayerScale2d(dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() else: self.token_mixer = None self.ls1 = None self.drop_path1 = None self.mlp = ConvMlpWithNorm(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=proj_drop, mid_conv=True) self.ls2 = LayerScale2d(dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): if self.token_mixer is not None: x = x + self.drop_path1(self.ls1(self.token_mixer(x))) x = x + self.drop_path2(self.ls2(self.mlp(x))) return x class Stem4(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d): super().__init__() self.stride = 4 self.conv1 = ConvNormAct(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer) self.conv2 = ConvNormAct(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer) class EfficientFormerV2Stage(nn.Module): def __init__(self, dim, dim_out, depth, resolution=7, downsample=True, block_stride=None, downsample_use_attn=False, block_use_attn=False, num_vit=1, mlp_ratio=4.0, proj_drop=0.0, drop_path=0.0, layer_scale_init_value=1e-05, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d): super().__init__() self.grad_checkpointing = False mlp_ratio = to_ntuple(depth)(mlp_ratio) resolution = to_2tuple(resolution) if downsample: self.downsample = Downsample(dim, dim_out, use_attn=downsample_use_attn, resolution=resolution, norm_layer=norm_layer, act_layer=act_layer) dim = dim_out resolution = tuple([math.ceil(r / 2) for r in resolution]) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): remain_idx = depth - num_vit - 1 b = EfficientFormerV2Block(dim, resolution=resolution, stride=block_stride, mlp_ratio=mlp_ratio[block_idx], use_attn=block_use_attn and block_idx > remain_idx, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer) blocks += [b] self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EfficientFormerV2(nn.Module): def __init__(self, depths, in_chans=3, img_size=224, global_pool='avg', embed_dims=None, downsamples=None, mlp_ratios=4, norm_layer='batchnorm2d', norm_eps=1e-05, act_layer='gelu', num_classes=1000, drop_rate=0.0, proj_drop_rate=0.0, drop_path_rate=0.0, layer_scale_init_value=1e-05, num_vit=0, distillation=True): super().__init__() assert global_pool in ('avg', '') self.num_classes = num_classes self.global_pool = global_pool self.feature_info = [] img_size = to_2tuple(img_size) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) act_layer = get_act_layer(act_layer) self.stem = Stem4(in_chans, embed_dims[0], act_layer=act_layer, norm_layer=norm_layer) prev_dim = embed_dims[0] stride = 4 num_stages = len(depths) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] downsamples = downsamples or (False,) + (True,) * (len(depths) - 1) mlp_ratios = to_ntuple(num_stages)(mlp_ratios) stages = [] for i in range(num_stages): curr_resolution = tuple([math.ceil(s / stride) for s in img_size]) stage = EfficientFormerV2Stage(prev_dim, embed_dims[i], depth=depths[i], resolution=curr_resolution, downsample=downsamples[i], block_stride=2 if i == 2 else None, downsample_use_attn=i >= 3, block_use_attn=i >= 2, num_vit=num_vit, mlp_ratio=mlp_ratios[i], proj_drop=proj_drop_rate, drop_path=dpr[i], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer) if downsamples[i]: stride *= 2 prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{i}')] stages.append(stage) self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = embed_dims[-1] self.norm = norm_layer(embed_dims[-1]) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.dist = distillation if self.dist: self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() else: self.head_dist = None self.apply(self.init_weights) self.distilled_training = False def init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {k for (k, _) in self.named_parameters() if 'attention_biases' in k} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks=[('^stages\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return (self.head, self.head_dist) def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': x = x.mean(dim=(2, 3)) x = self.head_drop(x) if pre_logits: return x (x, x_dist) = (self.head(x), self.head_dist(x)) if self.distilled_training and self.training and (not torch.jit.is_scripting()): return (x, x_dist) else: return (x + x_dist) / 2 def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': ('head', 'head_dist'), 'first_conv': 'stem.conv1.conv', **kwargs} default_cfgs = generate_default_cfgs({'efficientformerv2_s0.snap_dist_in1k': _cfg(hf_hub_id='timm/'), 'efficientformerv2_s1.snap_dist_in1k': _cfg(hf_hub_id='timm/'), 'efficientformerv2_s2.snap_dist_in1k': _cfg(hf_hub_id='timm/'), 'efficientformerv2_l.snap_dist_in1k': _cfg(hf_hub_id='timm/')}) def _create_efficientformerv2(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg(EfficientFormerV2, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def efficientformerv2_s0(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict(depths=EfficientFormer_depth['S0'], embed_dims=EfficientFormer_width['S0'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S0']) return _create_efficientformerv2('efficientformerv2_s0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s1(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict(depths=EfficientFormer_depth['S1'], embed_dims=EfficientFormer_width['S1'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S1']) return _create_efficientformerv2('efficientformerv2_s1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s2(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict(depths=EfficientFormer_depth['S2'], embed_dims=EfficientFormer_width['S2'], num_vit=4, drop_path_rate=0.02, mlp_ratios=EfficientFormer_expansion_ratios['S2']) return _create_efficientformerv2('efficientformerv2_s2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_l(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict(depths=EfficientFormer_depth['L'], embed_dims=EfficientFormer_width['L'], num_vit=6, drop_path_rate=0.1, mlp_ratios=EfficientFormer_expansion_ratios['L']) return _create_efficientformerv2('efficientformerv2_l', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/efficientnet.py """""" from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, GroupNormAct, LayerType from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks, feature_take_indices from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['EfficientNet', 'EfficientNetFeatures'] class EfficientNet(nn.Module): def __init__(self, block_args: BlockArgs, num_classes: int=1000, num_features: int=1280, in_chans: int=3, stem_size: int=32, stem_kernel_size: int=3, fix_stem: bool=False, output_stride: int=32, pad_type: str='', act_layer: Optional[LayerType]=None, norm_layer: Optional[LayerType]=None, aa_layer: Optional[LayerType]=None, se_layer: Optional[LayerType]=None, round_chs_fn: Callable=round_channels, drop_rate: float=0.0, drop_path_rate: float=0.0, global_pool: str='avg'): super(EfficientNet, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) builder = EfficientNetBuilder(output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features self.stage_ends = [f['stage'] for f in self.feature_info] head_chs = builder.in_chs if num_features > 0: self.conv_head = create_conv2d(head_chs, num_features, 1, padding=pad_type) self.bn2 = norm_act_layer(num_features, inplace=True) self.num_features = self.head_hidden_size = num_features else: self.conv_head = nn.Identity() self.bn2 = nn.Identity() self.num_features = self.head_hidden_size = head_chs (self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) efficientnet_init_weights(self) def as_sequential(self): layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.conv_head, self.bn2, self.global_pool]) layers.extend([nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^conv_stem|bn1', blocks=[('^blocks\\.(\\d+)' if coarse else '^blocks\\.(\\d+)\\.(\\d+)', None), ('conv_head|bn2', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False, extra_blocks: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] if extra_blocks: (take_indices, max_index) = feature_take_indices(len(self.blocks) + 1, indices) else: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] feat_idx = 0 x = self.conv_stem(x) x = self.bn1(x) if feat_idx in take_indices: intermediates.append(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index] for blk in blocks: feat_idx += 1 x = blk(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == self.stage_ends[-1]: x = self.conv_head(x) x = self.bn2(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True, extra_blocks: bool=False): if extra_blocks: (take_indices, max_index) = feature_take_indices(len(self.blocks) + 1, indices) else: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.blocks = self.blocks[:max_index] if prune_norm or max_index < len(self.blocks): self.conv_head = nn.Identity() self.bn2 = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) x = self.conv_head(x) x = self.bn2(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class EfficientNetFeatures(nn.Module): def __init__(self, block_args: BlockArgs, out_indices: Tuple[int, ...]=(0, 1, 2, 3, 4), feature_location: str='bottleneck', in_chans: int=3, stem_size: int=32, stem_kernel_size: int=3, fix_stem: bool=False, output_stride: int=32, pad_type: str='', act_layer: Optional[LayerType]=None, norm_layer: Optional[LayerType]=None, aa_layer: Optional[LayerType]=None, se_layer: Optional[LayerType]=None, round_chs_fn: Callable=round_channels, drop_rate: float=0.0, drop_path_rate: float=0.0): super(EfficientNetFeatures, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) builder = EfficientNetBuilder(output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, feature_location=feature_location) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x) -> List[torch.Tensor]: x = self.conv_stem(x) x = self.bn1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) for (i, b) in enumerate(self.blocks): if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_effnet(variant, pretrained=False, **kwargs): features_mode = '' model_cls = EfficientNet kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs or 'feature_cls' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') model_cls = EfficientNetFeatures features_mode = 'cls' model = build_model_with_cfg(model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs) if features_mode == 'cls': model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg) return model def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_noskip'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k5_s2_e3_c40_se0.25'], ['ir_r4_k3_s2_e6_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['ir_r1_k3_s1_e6_c320']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r3_k5_s2_e3_c40'], ['ir_r3_k5_s2_e6_c80'], ['ir_r2_k3_s1_e6_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=8, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v1(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, fix_stem_head=False, head_conv=False, pretrained=False, **kwargs): arch_def = [['dsa_r1_k3_s1_c64'], ['dsa_r2_k3_s2_c128'], ['dsa_r2_k3_s2_c256'], ['dsa_r6_k3_s2_c512'], ['dsa_r2_k3_s2_c1024']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) head_features = (1024 if fix_stem_head else max(1024, round_chs_fn(1024))) if head_conv else 0 model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head, group_size=group_size), num_features=head_features, stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v2(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, fix_stem_head=False, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head, group_size=group_size), num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): arch_def = [['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_condconv(variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'swish'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320']] model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), act_layer=resolve_act_layer(kwargs, 'relu6'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_base(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): arch_def = [['cn_r1_k3_s1_e1_c16_skip'], ['er_r2_k3_s2_e4_c32'], ['er_r2_k3_s2_e4_c48'], ['ir_r3_k3_s2_e4_c96_se0.25'], ['ir_r5_k3_s1_e6_c112_se0.25'], ['ir_r8_k3_s2_e6_c192_se0.25']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.0) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_s(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs): arch_def = [['cn_r2_k3_s1_e1_c24_skip'], ['er_r4_k3_s2_e4_c48'], ['er_r4_k3_s2_e4_c64'], ['ir_r6_k3_s2_e4_c128_se0.25'], ['ir_r9_k3_s1_e6_c160_se0.25'], ['ir_r15_k3_s2_e6_c256_se0.25']] num_features = 1280 if rw: arch_def[0] = ['er_r2_k3_s1_e1_c24'] arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] num_features = 1792 round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(num_features), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): arch_def = [['cn_r3_k3_s1_e1_c24_skip'], ['er_r5_k3_s2_e4_c48'], ['er_r5_k3_s2_e4_c80'], ['ir_r7_k3_s2_e4_c160_se0.25'], ['ir_r14_k3_s1_e6_c176_se0.25'], ['ir_r18_k3_s2_e6_c304_se0.25'], ['ir_r5_k3_s1_e6_c512_se0.25']] model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): arch_def = [['cn_r4_k3_s1_e1_c32_skip'], ['er_r7_k3_s2_e4_c64'], ['er_r7_k3_s2_e4_c96'], ['ir_r10_k3_s2_e4_c192_se0.25'], ['ir_r19_k3_s1_e6_c224_se0.25'], ['ir_r25_k3_s2_e6_c384_se0.25'], ['ir_r7_k3_s1_e6_c640_se0.25']] model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): arch_def = [['cn_r4_k3_s1_e1_c32_skip'], ['er_r8_k3_s2_e4_c64'], ['er_r8_k3_s2_e4_c96'], ['ir_r16_k3_s2_e4_c192_se0.25'], ['ir_r24_k3_s1_e6_c256_se0.25'], ['ir_r32_k3_s2_e6_c512_se0.25'], ['ir_r8_k3_s1_e6_c640_se0.25']] model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_x(variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, version=1, pretrained=False, **kwargs): """""" if version == 1: arch_def = [['ds_r1_k3_s1_e1_c16_se0.25_d1'], ['er_r2_k3_s2_e6_c24_se0.25_nre'], ['er_r2_k5_s2_e6_c40_se0.25_nre'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] else: arch_def = [['ds_r1_k3_s1_e1_c16_se0.25_d1'], ['er_r2_k3_s2_e4_c24_se0.25_nre'], ['er_r2_k5_s2_e4_c40_se0.25_nre'], ['ir_r3_k3_s2_e4_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'silu'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_tinynet(variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=max(1280, round_channels(1280, model_width, 8, None)), stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=model_width), act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_edgetpu(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): if 'edgetpu_v2' in variant: stem_size = 64 stem_kernel_size = 5 group_size = 64 num_features = 1280 act_layer = resolve_act_layer(kwargs, 'relu') def _arch_def(chs: List[int], group_size: int): return [[f'cn_r1_k1_s1_c{chs[0]}'], [f'er_r1_k3_s2_e8_c{chs[1]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[1]}'], [f'er_r1_k3_s2_e8_c{chs[2]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}', f'er_r1_k3_s1_e4_c{chs[2]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}'], [f'er_r1_k3_s2_e8_c{chs[3]}', f'ir_r3_k3_s1_e4_c{chs[3]}'], [f'ir_r1_k3_s1_e8_c{chs[4]}', f'ir_r3_k3_s1_e4_c{chs[4]}'], [f'ir_r1_k3_s2_e8_c{chs[5]}', f'ir_r3_k3_s1_e4_c{chs[5]}'], [f'ir_r1_k3_s1_e8_c{chs[6]}']] if 'edgetpu_v2_xs' in variant: stem_size = 32 stem_kernel_size = 3 channels = [16, 32, 48, 96, 144, 160, 192] elif 'edgetpu_v2_s' in variant: channels = [24, 48, 64, 128, 160, 192, 256] elif 'edgetpu_v2_m' in variant: channels = [32, 64, 80, 160, 192, 240, 320] num_features = 1344 elif 'edgetpu_v2_l' in variant: stem_kernel_size = 7 group_size = 128 channels = [32, 64, 96, 192, 240, 256, 384] num_features = 1408 else: assert False arch_def = _arch_def(channels, group_size) else: stem_size = 32 stem_kernel_size = 3 num_features = 1280 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['cn_r1_k1_s1_c16'], ['er_r1_k3_s2_e8_c32', 'er_r3_k3_s1_e4_c32'], ['er_r1_k3_s2_e8_c48', 'er_r3_k3_s1_e4_c48'], ['ir_r1_k3_s2_e8_c96', 'ir_r3_k3_s1_e4_c96'], ['ir_r1_k3_s1_e8_c96_noskip', 'ir_r3_k3_s1_e4_c96'], ['ir_r1_k5_s2_e8_c160', 'ir_r3_k5_s1_e4_c160'], ['ir_r1_k3_s1_e8_c192']] model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=num_features, stem_size=stem_size, stem_kernel_size=stem_kernel_size, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_test_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['cn_r1_k3_s1_e1_c16_skip'], ['er_r1_k3_s2_e4_c24'], ['er_r1_k3_s2_e4_c32'], ['ir_r1_k3_s2_e4_c48_se0.25'], ['ir_r1_k3_s2_e4_c64_se0.25']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.0) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_chs_fn(256), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'mnasnet_050.untrained': _cfg(), 'mnasnet_075.untrained': _cfg(), 'mnasnet_100.rmsp_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', hf_hub_id='timm/'), 'mnasnet_140.untrained': _cfg(), 'semnasnet_050.untrained': _cfg(), 'semnasnet_075.rmsp_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth', hf_hub_id='timm/'), 'semnasnet_100.rmsp_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', hf_hub_id='timm/'), 'semnasnet_140.untrained': _cfg(), 'mnasnet_small.lamb_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth', hf_hub_id='timm/'), 'mobilenetv1_100.ra4_e3600_r224_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95), 'mobilenetv1_100h.ra4_e3600_r224_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95), 'mobilenetv1_125.ra4_e3600_r224_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0), 'mobilenetv2_035.untrained': _cfg(), 'mobilenetv2_050.lamb_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv2_075.untrained': _cfg(), 'mobilenetv2_100.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', hf_hub_id='timm/'), 'mobilenetv2_110d.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', hf_hub_id='timm/'), 'mobilenetv2_120d.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', hf_hub_id='timm/'), 'mobilenetv2_140.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', hf_hub_id='timm/'), 'fbnetc_100.rmsp_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', hf_hub_id='timm/', interpolation='bilinear'), 'spnasnet_100.rmsp_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', hf_hub_id='timm/', interpolation='bilinear'), 'efficientnet_b0.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', hf_hub_id='timm/'), 'efficientnet_b0.ra4_e3600_r224_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0), 'efficientnet_b1.ra4_e3600_r240_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), crop_pct=0.9, pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'efficientnet_b1.ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=1.0), 'efficientnet_b2.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'efficientnet_b3.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'efficientnet_b4.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'efficientnet_b5.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'), 'efficientnet_b5.sw_in12k': _cfg(hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821), 'efficientnet_b6.untrained': _cfg(url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'efficientnet_b7.untrained': _cfg(url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'efficientnet_b8.untrained': _cfg(url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'efficientnet_l2.untrained': _cfg(url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), 'efficientnet_b0_gn.untrained': _cfg(), 'efficientnet_b0_g8_gn.untrained': _cfg(), 'efficientnet_b0_g16_evos.untrained': _cfg(), 'efficientnet_b3_gn.untrained': _cfg(input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_b3_g8_gn.untrained': _cfg(input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_blur_b0.untrained': _cfg(), 'efficientnet_es.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', hf_hub_id='timm/'), 'efficientnet_em.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_el.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_es_pruned.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth', hf_hub_id='timm/'), 'efficientnet_el_pruned.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_cc_b0_4e.untrained': _cfg(), 'efficientnet_cc_b0_8e.untrained': _cfg(), 'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite0.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', hf_hub_id='timm/'), 'efficientnet_lite1.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite2.untrained': _cfg(input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.89), 'efficientnet_lite3.untrained': _cfg(input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_lite4.untrained': _cfg(input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'efficientnet_b1_pruned.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b2_pruned.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.89, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b3_pruned.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnetv2_rw_t.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'gc_efficientnetv2_rw_t.agc_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'efficientnetv2_rw_s.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', hf_hub_id='timm/', input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_rw_m.agc_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', hf_hub_id='timm/', input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_s.untrained': _cfg(input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_m.untrained': _cfg(input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_l.untrained': _cfg(input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), 'efficientnetv2_xl.untrained': _cfg(input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), 'tf_efficientnet_b0.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.89), 'tf_efficientnet_b3.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_l2.ns_jft_in1k_475': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', hf_hub_id='timm/', input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), 'tf_efficientnet_l2.ns_jft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', hf_hub_id='timm/', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), 'tf_efficientnet_b0.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), 'tf_efficientnet_b1.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.89), 'tf_efficientnet_b3.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ap_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b5.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b7.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', hf_hub_id='timm/', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b0.aa_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.aa_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.aa_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.89), 'tf_efficientnet_b3.aa_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.aa_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.aa_in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.aa_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.aa_in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b0.in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.89), 'tf_efficientnet_b3.in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.in1k': _cfg(url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_es.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224)), 'tf_efficientnet_em.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_el.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_cc_b0_4e.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b0_8e.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b1_8e.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_lite0.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), interpolation='bicubic'), 'tf_efficientnet_lite1.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, interpolation='bicubic'), 'tf_efficientnet_lite2.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.89, interpolation='bicubic'), 'tf_efficientnet_lite3.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), 'tf_efficientnet_lite4.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.92, interpolation='bilinear'), 'tf_efficientnetv2_s.in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in21k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_b0.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), 'tf_efficientnetv2_b1.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), 'tf_efficientnetv2_b2.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', hf_hub_id='timm/', input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.89), 'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'), 'tf_efficientnetv2_b3.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'tf_efficientnetv2_b3.in21k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'mixnet_s.ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', hf_hub_id='timm/'), 'mixnet_m.ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', hf_hub_id='timm/'), 'mixnet_l.ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', hf_hub_id='timm/'), 'mixnet_xl.ra_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', hf_hub_id='timm/'), 'mixnet_xxl.untrained': _cfg(), 'tf_mixnet_s.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', hf_hub_id='timm/'), 'tf_mixnet_m.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', hf_hub_id='timm/'), 'tf_mixnet_l.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth', hf_hub_id='timm/'), 'tinynet_a.in1k': _cfg(input_size=(3, 192, 192), pool_size=(6, 6), url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth', hf_hub_id='timm/'), 'tinynet_b.in1k': _cfg(input_size=(3, 188, 188), pool_size=(6, 6), url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth', hf_hub_id='timm/'), 'tinynet_c.in1k': _cfg(input_size=(3, 184, 184), pool_size=(6, 6), url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth', hf_hub_id='timm/'), 'tinynet_d.in1k': _cfg(input_size=(3, 152, 152), pool_size=(5, 5), url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth', hf_hub_id='timm/'), 'tinynet_e.in1k': _cfg(input_size=(3, 106, 106), pool_size=(4, 4), url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth', hf_hub_id='timm/'), 'mobilenet_edgetpu_100.untrained': _cfg(input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_xs.untrained': _cfg(input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_s.untrained': _cfg(input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_m.ra4_e3600_r224_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=0.95), 'mobilenet_edgetpu_v2_l.untrained': _cfg(input_size=(3, 224, 224), crop_pct=0.9), 'test_efficientnet.r160_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 160, 160), pool_size=(5, 5))}) @register_model def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_100(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v1('mobilenetv1_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_100h(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v1('mobilenetv1_100h', 1.0, head_conv=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_125(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v1('mobilenetv1_125', 1.25, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v2('mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_v2('mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet: if pretrained: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet: model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b0_g16_evos', group_size=16, channel_divisor=16, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_blur_b0(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet('efficientnet_blur_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs) return model @register_model def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_edge('efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_edge('efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_edge('efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_edge('efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_edge('efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_condconv('efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_condconv('efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_condconv('efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_lite('efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_lite('efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_lite('efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_lite('efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_lite('efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') variant = 'efficientnet_b1_pruned' model = _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_s('efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) return model @register_model def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_s('gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, se_layer='gc', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_s('efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet('tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge('tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge('tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge('tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv('tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite('tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite('tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite('tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite('tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite('tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base('tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base('tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base('tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_x_b3(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_x('efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_x_b5(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_x('efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_h_b5(pretrained=False, **kwargs) -> EfficientNet: model = _gen_efficientnet_x('efficientnet_b5', channel_multiplier=1.92, depth_multiplier=2.2, version=2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_s(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mixnet_s('mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_m(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mixnet_m('mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_l(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mixnet_m('mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mixnet_m('mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mixnet_m('mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_s('tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m('tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m('tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tinynet_a(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) return model @register_model def tinynet_b(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) return model @register_model def tinynet_c(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) return model @register_model def tinynet_d(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) return model @register_model def tinynet_e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_100(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_100', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_xs(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_xs', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_s(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_s', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_m(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_m', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_l(pretrained=False, **kwargs) -> EfficientNet: model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_l', pretrained=pretrained, **kwargs) return model @register_model def test_efficientnet(pretrained=False, **kwargs) -> EfficientNet: model = _gen_test_efficientnet('test_efficientnet', pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, {'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k', 'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k', 'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k', 'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k', 'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k', 'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k', 'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k', 'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k', 'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k', 'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k', 'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k', 'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k', 'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k', 'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k', 'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k', 'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k', 'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k', 'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475', 'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k', 'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k', 'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k', 'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k', 'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k', 'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k', 'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k', 'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k', 'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k', 'efficientnet_b2a': 'efficientnet_b2', 'efficientnet_b3a': 'efficientnet_b3', 'mnasnet_a1': 'semnasnet_100', 'mnasnet_b1': 'mnasnet_100'}) # File: pytorch-image-models-main/timm/models/efficientvit_mit.py """""" __all__ = ['EfficientVit', 'EfficientVitLarge'] from typing import List, Optional from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, create_conv2d, GELUTanh from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs def val2list(x: list or tuple or any, repeat_time=1): if isinstance(x, (list, tuple)): return list(x) return [x for _ in range(repeat_time)] def val2tuple(x: list or tuple or any, min_len: int=1, idx_repeat: int=-1): x = val2list(x) if len(x) > 0: x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))] return tuple(x) def get_same_padding(kernel_size: int or tuple[int, ...]) -> int or tuple[int, ...]: if isinstance(kernel_size, tuple): return tuple([get_same_padding(ks) for ks in kernel_size]) else: assert kernel_size % 2 > 0, 'kernel size should be odd number' return kernel_size // 2 class ConvNormAct(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size=3, stride=1, dilation=1, groups=1, bias=False, dropout=0.0, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU): super(ConvNormAct, self).__init__() self.dropout = nn.Dropout(dropout, inplace=False) self.conv = create_conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=bias) self.norm = norm_layer(num_features=out_channels) if norm_layer else nn.Identity() self.act = act_layer(inplace=True) if act_layer is not None else nn.Identity() def forward(self, x): x = self.dropout(x) x = self.conv(x) x = self.norm(x) x = self.act(x) return x class DSConv(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size=3, stride=1, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, None)): super(DSConv, self).__init__() use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) self.depth_conv = ConvNormAct(in_channels, in_channels, kernel_size, stride, groups=in_channels, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0]) self.point_conv = ConvNormAct(in_channels, out_channels, 1, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1]) def forward(self, x): x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size=3, stride=1, mid_channels=None, expand_ratio=1, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, None)): super(ConvBlock, self).__init__() use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) mid_channels = mid_channels or round(in_channels * expand_ratio) self.conv1 = ConvNormAct(in_channels, mid_channels, kernel_size, stride, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0]) self.conv2 = ConvNormAct(mid_channels, out_channels, kernel_size, 1, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1]) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class MBConv(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size=3, stride=1, mid_channels=None, expand_ratio=6, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, nn.ReLU6, None)): super(MBConv, self).__init__() use_bias = val2tuple(use_bias, 3) norm_layer = val2tuple(norm_layer, 3) act_layer = val2tuple(act_layer, 3) mid_channels = mid_channels or round(in_channels * expand_ratio) self.inverted_conv = ConvNormAct(in_channels, mid_channels, 1, stride=1, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0]) self.depth_conv = ConvNormAct(mid_channels, mid_channels, kernel_size, stride=stride, groups=mid_channels, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1]) self.point_conv = ConvNormAct(mid_channels, out_channels, 1, norm_layer=norm_layer[2], act_layer=act_layer[2], bias=use_bias[2]) def forward(self, x): x = self.inverted_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class FusedMBConv(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size=3, stride=1, mid_channels=None, expand_ratio=6, groups=1, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, None)): super(FusedMBConv, self).__init__() use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) mid_channels = mid_channels or round(in_channels * expand_ratio) self.spatial_conv = ConvNormAct(in_channels, mid_channels, kernel_size, stride=stride, groups=groups, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0]) self.point_conv = ConvNormAct(mid_channels, out_channels, 1, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1]) def forward(self, x): x = self.spatial_conv(x) x = self.point_conv(x) return x class LiteMLA(nn.Module): def __init__(self, in_channels: int, out_channels: int, heads: int or None=None, heads_ratio: float=1.0, dim=8, use_bias=False, norm_layer=(None, nn.BatchNorm2d), act_layer=(None, None), kernel_func=nn.ReLU, scales=(5,), eps=1e-05): super(LiteMLA, self).__init__() self.eps = eps heads = heads or int(in_channels // dim * heads_ratio) total_dim = heads * dim use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) self.dim = dim self.qkv = ConvNormAct(in_channels, 3 * total_dim, 1, bias=use_bias[0], norm_layer=norm_layer[0], act_layer=act_layer[0]) self.aggreg = nn.ModuleList([nn.Sequential(nn.Conv2d(3 * total_dim, 3 * total_dim, scale, padding=get_same_padding(scale), groups=3 * total_dim, bias=use_bias[0]), nn.Conv2d(3 * total_dim, 3 * total_dim, 1, groups=3 * heads, bias=use_bias[0])) for scale in scales]) self.kernel_func = kernel_func(inplace=False) self.proj = ConvNormAct(total_dim * (1 + len(scales)), out_channels, 1, bias=use_bias[1], norm_layer=norm_layer[1], act_layer=act_layer[1]) def _attn(self, q, k, v): dtype = v.dtype (q, k, v) = (q.float(), k.float(), v.float()) kv = k.transpose(-1, -2) @ v out = q @ kv out = out[..., :-1] / (out[..., -1:] + self.eps) return out.to(dtype) def forward(self, x): (B, _, H, W) = x.shape qkv = self.qkv(x) multi_scale_qkv = [qkv] for op in self.aggreg: multi_scale_qkv.append(op(qkv)) multi_scale_qkv = torch.cat(multi_scale_qkv, dim=1) multi_scale_qkv = multi_scale_qkv.reshape(B, -1, 3 * self.dim, H * W).transpose(-1, -2) (q, k, v) = multi_scale_qkv.chunk(3, dim=-1) q = self.kernel_func(q) k = self.kernel_func(k) v = F.pad(v, (0, 1), mode='constant', value=1.0) if not torch.jit.is_scripting(): with torch.autocast(device_type=v.device.type, enabled=False): out = self._attn(q, k, v) else: out = self._attn(q, k, v) out = out.transpose(-1, -2).reshape(B, -1, H, W) out = self.proj(out) return out register_notrace_module(LiteMLA) class EfficientVitBlock(nn.Module): def __init__(self, in_channels, heads_ratio=1.0, head_dim=32, expand_ratio=4, norm_layer=nn.BatchNorm2d, act_layer=nn.Hardswish): super(EfficientVitBlock, self).__init__() self.context_module = ResidualBlock(LiteMLA(in_channels=in_channels, out_channels=in_channels, heads_ratio=heads_ratio, dim=head_dim, norm_layer=(None, norm_layer)), nn.Identity()) self.local_module = ResidualBlock(MBConv(in_channels=in_channels, out_channels=in_channels, expand_ratio=expand_ratio, use_bias=(True, True, False), norm_layer=(None, None, norm_layer), act_layer=(act_layer, act_layer, None)), nn.Identity()) def forward(self, x): x = self.context_module(x) x = self.local_module(x) return x class ResidualBlock(nn.Module): def __init__(self, main: Optional[nn.Module], shortcut: Optional[nn.Module]=None, pre_norm: Optional[nn.Module]=None): super(ResidualBlock, self).__init__() self.pre_norm = pre_norm if pre_norm is not None else nn.Identity() self.main = main self.shortcut = shortcut def forward(self, x): res = self.main(self.pre_norm(x)) if self.shortcut is not None: res = res + self.shortcut(x) return res def build_local_block(in_channels: int, out_channels: int, stride: int, expand_ratio: float, norm_layer: str, act_layer: str, fewer_norm: bool=False, block_type: str='default'): assert block_type in ['default', 'large', 'fused'] if expand_ratio == 1: if block_type == 'default': block = DSConv(in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=(True, False) if fewer_norm else False, norm_layer=(None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, None)) else: block = ConvBlock(in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=(True, False) if fewer_norm else False, norm_layer=(None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, None)) elif block_type == 'default': block = MBConv(in_channels=in_channels, out_channels=out_channels, stride=stride, expand_ratio=expand_ratio, use_bias=(True, True, False) if fewer_norm else False, norm_layer=(None, None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, act_layer, None)) else: block = FusedMBConv(in_channels=in_channels, out_channels=out_channels, stride=stride, expand_ratio=expand_ratio, use_bias=(True, False) if fewer_norm else False, norm_layer=(None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, None)) return block class Stem(nn.Sequential): def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer, block_type='default'): super().__init__() self.stride = 2 self.add_module('in_conv', ConvNormAct(in_chs, out_chs, kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer)) stem_block = 0 for _ in range(depth): self.add_module(f'res{stem_block}', ResidualBlock(build_local_block(in_channels=out_chs, out_channels=out_chs, stride=1, expand_ratio=1, norm_layer=norm_layer, act_layer=act_layer, block_type=block_type), nn.Identity())) stem_block += 1 class EfficientVitStage(nn.Module): def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer, expand_ratio, head_dim, vit_stage=False): super(EfficientVitStage, self).__init__() blocks = [ResidualBlock(build_local_block(in_channels=in_chs, out_channels=out_chs, stride=2, expand_ratio=expand_ratio, norm_layer=norm_layer, act_layer=act_layer, fewer_norm=vit_stage), None)] in_chs = out_chs if vit_stage: for _ in range(depth): blocks.append(EfficientVitBlock(in_channels=in_chs, head_dim=head_dim, expand_ratio=expand_ratio, norm_layer=norm_layer, act_layer=act_layer)) else: for i in range(1, depth): blocks.append(ResidualBlock(build_local_block(in_channels=in_chs, out_channels=out_chs, stride=1, expand_ratio=expand_ratio, norm_layer=norm_layer, act_layer=act_layer), nn.Identity())) self.blocks = nn.Sequential(*blocks) def forward(self, x): return self.blocks(x) class EfficientVitLargeStage(nn.Module): def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer, head_dim, vit_stage=False, fewer_norm=False): super(EfficientVitLargeStage, self).__init__() blocks = [ResidualBlock(build_local_block(in_channels=in_chs, out_channels=out_chs, stride=2, expand_ratio=24 if vit_stage else 16, norm_layer=norm_layer, act_layer=act_layer, fewer_norm=vit_stage or fewer_norm, block_type='default' if fewer_norm else 'fused'), None)] in_chs = out_chs if vit_stage: for _ in range(depth): blocks.append(EfficientVitBlock(in_channels=in_chs, head_dim=head_dim, expand_ratio=6, norm_layer=norm_layer, act_layer=act_layer)) else: for i in range(depth): blocks.append(ResidualBlock(build_local_block(in_channels=in_chs, out_channels=out_chs, stride=1, expand_ratio=4, norm_layer=norm_layer, act_layer=act_layer, fewer_norm=fewer_norm, block_type='default' if fewer_norm else 'fused'), nn.Identity())) self.blocks = nn.Sequential(*blocks) def forward(self, x): return self.blocks(x) class ClassifierHead(nn.Module): def __init__(self, in_channels: int, widths: List[int], num_classes: int=1000, dropout: float=0.0, norm_layer=nn.BatchNorm2d, act_layer=nn.Hardswish, pool_type: str='avg', norm_eps: float=1e-05): super(ClassifierHead, self).__init__() self.widths = widths self.num_features = widths[-1] assert pool_type, 'Cannot disable pooling' self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer) self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) self.classifier = nn.Sequential(nn.Linear(widths[0], widths[1], bias=False), nn.LayerNorm(widths[1], eps=norm_eps), act_layer(inplace=True) if act_layer is not None else nn.Identity(), nn.Dropout(dropout, inplace=False), nn.Linear(widths[1], num_classes, bias=True) if num_classes > 0 else nn.Identity()) def reset(self, num_classes: int, pool_type: Optional[str]=None): if pool_type is not None: assert pool_type, 'Cannot disable pooling' self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) if num_classes > 0: self.classifier[-1] = nn.Linear(self.num_features, num_classes, bias=True) else: self.classifier[-1] = nn.Identity() def forward(self, x, pre_logits: bool=False): x = self.in_conv(x) x = self.global_pool(x) if pre_logits: x = self.classifier[0](x) x = self.classifier[1](x) x = self.classifier[2](x) x = self.classifier[3](x) else: x = self.classifier(x) return x class EfficientVit(nn.Module): def __init__(self, in_chans=3, widths=(), depths=(), head_dim=32, expand_ratio=4, norm_layer=nn.BatchNorm2d, act_layer=nn.Hardswish, global_pool='avg', head_widths=(), drop_rate=0.0, num_classes=1000): super(EfficientVit, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.num_classes = num_classes self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer) stride = self.stem.stride self.feature_info = [] self.stages = nn.Sequential() in_channels = widths[0] for (i, (w, d)) in enumerate(zip(widths[1:], depths[1:])): self.stages.append(EfficientVitStage(in_channels, w, depth=d, norm_layer=norm_layer, act_layer=act_layer, expand_ratio=expand_ratio, head_dim=head_dim, vit_stage=i >= 2)) stride *= 2 in_channels = w self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')] self.num_features = in_channels self.head = ClassifierHead(self.num_features, widths=head_widths, num_classes=num_classes, dropout=drop_rate, pool_type=self.global_pool) self.head_hidden_size = self.head.num_features @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+).downsample', (0,)), ('^stages\\.(\\d+)\\.\\w+\\.(\\d+)', None)]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.classifier[-1] def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class EfficientVitLarge(nn.Module): def __init__(self, in_chans=3, widths=(), depths=(), head_dim=32, norm_layer=nn.BatchNorm2d, act_layer=GELUTanh, global_pool='avg', head_widths=(), drop_rate=0.0, num_classes=1000, norm_eps=1e-07): super(EfficientVitLarge, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.num_classes = num_classes self.norm_eps = norm_eps norm_layer = partial(norm_layer, eps=self.norm_eps) self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer, block_type='large') stride = self.stem.stride self.feature_info = [] self.stages = nn.Sequential() in_channels = widths[0] for (i, (w, d)) in enumerate(zip(widths[1:], depths[1:])): self.stages.append(EfficientVitLargeStage(in_channels, w, depth=d, norm_layer=norm_layer, act_layer=act_layer, head_dim=head_dim, vit_stage=i >= 3, fewer_norm=i >= 2)) stride *= 2 in_channels = w self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')] self.num_features = in_channels self.head = ClassifierHead(self.num_features, widths=head_widths, num_classes=num_classes, dropout=drop_rate, pool_type=self.global_pool, act_layer=act_layer, norm_eps=self.norm_eps) self.head_hidden_size = self.head.num_features @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+).downsample', (0,)), ('^stages\\.(\\d+)\\.\\w+\\.(\\d+)', None)]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.classifier[-1] def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.in_conv.conv', 'classifier': 'head.classifier.4', 'crop_pct': 0.95, 'input_size': (3, 224, 224), 'pool_size': (7, 7), **kwargs} default_cfgs = generate_default_cfgs({'efficientvit_b0.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_b1.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_b1.r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'efficientvit_b1.r288_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0), 'efficientvit_b2.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_b2.r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'efficientvit_b2.r288_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0), 'efficientvit_b3.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_b3.r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'efficientvit_b3.r288_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0), 'efficientvit_l1.r224_in1k': _cfg(hf_hub_id='timm/', crop_pct=1.0), 'efficientvit_l2.r224_in1k': _cfg(hf_hub_id='timm/', crop_pct=1.0), 'efficientvit_l2.r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'efficientvit_l2.r288_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0), 'efficientvit_l2.r384_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'efficientvit_l3.r224_in1k': _cfg(hf_hub_id='timm/', crop_pct=1.0), 'efficientvit_l3.r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'efficientvit_l3.r320_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'efficientvit_l3.r384_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0)}) def _create_efficientvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg(EfficientVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _create_efficientvit_large(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg(EfficientVitLarge, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def efficientvit_b0(pretrained=False, **kwargs): model_args = dict(widths=(8, 16, 32, 64, 128), depths=(1, 2, 2, 2, 2), head_dim=16, head_widths=(1024, 1280)) return _create_efficientvit('efficientvit_b0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_b1(pretrained=False, **kwargs): model_args = dict(widths=(16, 32, 64, 128, 256), depths=(1, 2, 3, 3, 4), head_dim=16, head_widths=(1536, 1600)) return _create_efficientvit('efficientvit_b1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_b2(pretrained=False, **kwargs): model_args = dict(widths=(24, 48, 96, 192, 384), depths=(1, 3, 4, 4, 6), head_dim=32, head_widths=(2304, 2560)) return _create_efficientvit('efficientvit_b2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_b3(pretrained=False, **kwargs): model_args = dict(widths=(32, 64, 128, 256, 512), depths=(1, 4, 6, 6, 9), head_dim=32, head_widths=(2304, 2560)) return _create_efficientvit('efficientvit_b3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_l1(pretrained=False, **kwargs): model_args = dict(widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 6, 6), head_dim=32, head_widths=(3072, 3200)) return _create_efficientvit_large('efficientvit_l1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_l2(pretrained=False, **kwargs): model_args = dict(widths=(32, 64, 128, 256, 512), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(3072, 3200)) return _create_efficientvit_large('efficientvit_l2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_l3(pretrained=False, **kwargs): model_args = dict(widths=(64, 128, 256, 512, 1024), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(6144, 6400)) return _create_efficientvit_large('efficientvit_l3', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/efficientvit_msra.py """""" __all__ = ['EfficientVitMsra'] import itertools from collections import OrderedDict from typing import Dict, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SqueezeExcite, SelectAdaptivePool2d, trunc_normal_, _assert from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs class ConvNorm(torch.nn.Sequential): def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False) self.bn = nn.BatchNorm2d(out_chs) torch.nn.init.constant_(self.bn.weight, bn_weight_init) torch.nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): (c, bn) = (self.conv, self.bn) w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = torch.nn.Conv2d(w.size(1) * self.conv.groups, w.size(0), w.shape[2:], stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class NormLinear(torch.nn.Sequential): def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.0): super().__init__() self.bn = nn.BatchNorm1d(in_features) self.drop = nn.Dropout(drop) self.linear = nn.Linear(in_features, out_features, bias=bias) trunc_normal_(self.linear.weight, std=std) if self.linear.bias is not None: nn.init.constant_(self.linear.bias, 0) @torch.no_grad() def fuse(self): (bn, linear) = (self.bn, self.linear) w = bn.weight / (bn.running_var + bn.eps) ** 0.5 b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 w = linear.weight * w[None, :] if linear.bias is None: b = b @ self.linear.weight.T else: b = (linear.weight @ b[:, None]).view(-1) + self.linear.bias m = torch.nn.Linear(w.size(1), w.size(0)) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class PatchMerging(torch.nn.Module): def __init__(self, dim, out_dim): super().__init__() hid_dim = int(dim * 4) self.conv1 = ConvNorm(dim, hid_dim, 1, 1, 0) self.act = torch.nn.ReLU() self.conv2 = ConvNorm(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim) self.se = SqueezeExcite(hid_dim, 0.25) self.conv3 = ConvNorm(hid_dim, out_dim, 1, 1, 0) def forward(self, x): x = self.conv3(self.se(self.act(self.conv2(self.act(self.conv1(x)))))) return x class ResidualDrop(torch.nn.Module): def __init__(self, m, drop=0.0): super().__init__() self.m = m self.drop = drop def forward(self, x): if self.training and self.drop > 0: return x + self.m(x) * torch.rand(x.size(0), 1, 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() else: return x + self.m(x) class ConvMlp(torch.nn.Module): def __init__(self, ed, h): super().__init__() self.pw1 = ConvNorm(ed, h) self.act = torch.nn.ReLU() self.pw2 = ConvNorm(h, ed, bn_weight_init=0) def forward(self, x): x = self.pw2(self.act(self.pw1(x))) return x class CascadedGroupAttention(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] '' def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, kernels=(5, 5, 5, 5)): super().__init__() self.num_heads = num_heads self.scale = key_dim ** (-0.5) self.key_dim = key_dim self.val_dim = int(attn_ratio * key_dim) self.attn_ratio = attn_ratio qkvs = [] dws = [] for i in range(num_heads): qkvs.append(ConvNorm(dim // num_heads, self.key_dim * 2 + self.val_dim)) dws.append(ConvNorm(self.key_dim, self.key_dim, kernels[i], 1, kernels[i] // 2, groups=self.key_dim)) self.qkvs = torch.nn.ModuleList(qkvs) self.dws = torch.nn.ModuleList(dws) self.proj = torch.nn.Sequential(torch.nn.ReLU(), ConvNorm(self.val_dim * num_heads, dim, bn_weight_init=0)) points = list(itertools.product(range(resolution), range(resolution))) N = len(points) attention_offsets = {} idxs = [] for p1 in points: for p2 in points: offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) idxs.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): (B, C, H, W) = x.shape feats_in = x.chunk(len(self.qkvs), dim=1) feats_out = [] feat = feats_in[0] attn_bias = self.get_attention_biases(x.device) for (head_idx, (qkv, dws)) in enumerate(zip(self.qkvs, self.dws)): if head_idx > 0: feat = feat + feats_in[head_idx] feat = qkv(feat) (q, k, v) = feat.view(B, -1, H, W).split([self.key_dim, self.key_dim, self.val_dim], dim=1) q = dws(q) (q, k, v) = (q.flatten(2), k.flatten(2), v.flatten(2)) q = q * self.scale attn = q.transpose(-2, -1) @ k attn = attn + attn_bias[head_idx] attn = attn.softmax(dim=-1) feat = v @ attn.transpose(-2, -1) feat = feat.view(B, self.val_dim, H, W) feats_out.append(feat) x = self.proj(torch.cat(feats_out, 1)) return x class LocalWindowAttention(torch.nn.Module): def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, window_resolution=7, kernels=(5, 5, 5, 5)): super().__init__() self.dim = dim self.num_heads = num_heads self.resolution = resolution assert window_resolution > 0, 'window_size must be greater than 0' self.window_resolution = window_resolution window_resolution = min(window_resolution, resolution) self.attn = CascadedGroupAttention(dim, key_dim, num_heads, attn_ratio=attn_ratio, resolution=window_resolution, kernels=kernels) def forward(self, x): H = W = self.resolution (B, C, H_, W_) = x.shape _assert(H == H_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}') _assert(W == W_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}') if H <= self.window_resolution and W <= self.window_resolution: x = self.attn(x) else: x = x.permute(0, 2, 3, 1) pad_b = (self.window_resolution - H % self.window_resolution) % self.window_resolution pad_r = (self.window_resolution - W % self.window_resolution) % self.window_resolution x = torch.nn.functional.pad(x, (0, 0, 0, pad_r, 0, pad_b)) (pH, pW) = (H + pad_b, W + pad_r) nH = pH // self.window_resolution nW = pW // self.window_resolution x = x.view(B, nH, self.window_resolution, nW, self.window_resolution, C).transpose(2, 3) x = x.reshape(B * nH * nW, self.window_resolution, self.window_resolution, C).permute(0, 3, 1, 2) x = self.attn(x) x = x.permute(0, 2, 3, 1).view(B, nH, nW, self.window_resolution, self.window_resolution, C) x = x.transpose(2, 3).reshape(B, pH, pW, C) x = x[:, :H, :W].contiguous() x = x.permute(0, 3, 1, 2) return x class EfficientVitBlock(torch.nn.Module): def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, window_resolution=7, kernels=[5, 5, 5, 5]): super().__init__() self.dw0 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.0)) self.ffn0 = ResidualDrop(ConvMlp(dim, int(dim * 2))) self.mixer = ResidualDrop(LocalWindowAttention(dim, key_dim, num_heads, attn_ratio=attn_ratio, resolution=resolution, window_resolution=window_resolution, kernels=kernels)) self.dw1 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.0)) self.ffn1 = ResidualDrop(ConvMlp(dim, int(dim * 2))) def forward(self, x): return self.ffn1(self.dw1(self.mixer(self.ffn0(self.dw0(x))))) class EfficientVitStage(torch.nn.Module): def __init__(self, in_dim, out_dim, key_dim, downsample=('', 1), num_heads=8, attn_ratio=4, resolution=14, window_resolution=7, kernels=[5, 5, 5, 5], depth=1): super().__init__() if downsample[0] == 'subsample': self.resolution = (resolution - 1) // downsample[1] + 1 down_blocks = [] down_blocks.append(('res1', torch.nn.Sequential(ResidualDrop(ConvNorm(in_dim, in_dim, 3, 1, 1, groups=in_dim)), ResidualDrop(ConvMlp(in_dim, int(in_dim * 2)))))) down_blocks.append(('patchmerge', PatchMerging(in_dim, out_dim))) down_blocks.append(('res2', torch.nn.Sequential(ResidualDrop(ConvNorm(out_dim, out_dim, 3, 1, 1, groups=out_dim)), ResidualDrop(ConvMlp(out_dim, int(out_dim * 2)))))) self.downsample = nn.Sequential(OrderedDict(down_blocks)) else: assert in_dim == out_dim self.downsample = nn.Identity() self.resolution = resolution blocks = [] for d in range(depth): blocks.append(EfficientVitBlock(out_dim, key_dim, num_heads, attn_ratio, self.resolution, window_resolution, kernels)) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class PatchEmbedding(torch.nn.Sequential): def __init__(self, in_chans, dim): super().__init__() self.add_module('conv1', ConvNorm(in_chans, dim // 8, 3, 2, 1)) self.add_module('relu1', torch.nn.ReLU()) self.add_module('conv2', ConvNorm(dim // 8, dim // 4, 3, 2, 1)) self.add_module('relu2', torch.nn.ReLU()) self.add_module('conv3', ConvNorm(dim // 4, dim // 2, 3, 2, 1)) self.add_module('relu3', torch.nn.ReLU()) self.add_module('conv4', ConvNorm(dim // 2, dim, 3, 2, 1)) self.patch_size = 16 class EfficientVitMsra(nn.Module): def __init__(self, img_size=224, in_chans=3, num_classes=1000, embed_dim=(64, 128, 192), key_dim=(16, 16, 16), depth=(1, 2, 3), num_heads=(4, 4, 4), window_size=(7, 7, 7), kernels=(5, 5, 5, 5), down_ops=(('', 1), ('subsample', 2), ('subsample', 2)), global_pool='avg', drop_rate=0.0): super(EfficientVitMsra, self).__init__() self.grad_checkpointing = False self.num_classes = num_classes self.drop_rate = drop_rate self.patch_embed = PatchEmbedding(in_chans, embed_dim[0]) stride = self.patch_embed.patch_size resolution = img_size // self.patch_embed.patch_size attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))] self.feature_info = [] stages = [] pre_ed = embed_dim[0] for (i, (ed, kd, dpth, nh, ar, wd, do)) in enumerate(zip(embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)): stage = EfficientVitStage(in_dim=pre_ed, out_dim=ed, key_dim=kd, downsample=do, num_heads=nh, attn_ratio=ar, resolution=resolution, window_resolution=wd, kernels=kernels, depth=dpth) pre_ed = ed if do[0] == 'subsample' and i != 0: stride *= do[1] resolution = stage.resolution stages.append(stage) self.feature_info += [dict(num_chs=ed, reduction=stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) if global_pool == 'avg': self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: assert num_classes == 0 self.global_pool = nn.Identity() self.num_features = self.head_hidden_size = embed_dim[-1] self.head = NormLinear(self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity() @torch.jit.ignore def no_weight_decay(self): return {x for x in self.state_dict().keys() if 'attention_biases' in x} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^patch_embed', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+).downsample', (0,)), ('^stages\\.(\\d+)\\.\\w+\\.(\\d+)', None)]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.linear def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: if global_pool == 'avg': self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: assert num_classes == 0 self.global_pool = nn.Identity() self.head = NormLinear(self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv1.conv', 'classifier': 'head.linear', 'fixed_input_size': True, 'pool_size': (4, 4), **kwargs} default_cfgs = generate_default_cfgs({'efficientvit_m0.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_m1.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_m2.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_m3.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_m4.r224_in1k': _cfg(hf_hub_id='timm/'), 'efficientvit_m5.r224_in1k': _cfg(hf_hub_id='timm/')}) def _create_efficientvit_msra(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2)) model = build_model_with_cfg(EfficientVitMsra, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def efficientvit_m0(pretrained=False, **kwargs): model_args = dict(img_size=224, embed_dim=[64, 128, 192], depth=[1, 2, 3], num_heads=[4, 4, 4], window_size=[7, 7, 7], kernels=[5, 5, 5, 5]) return _create_efficientvit_msra('efficientvit_m0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m1(pretrained=False, **kwargs): model_args = dict(img_size=224, embed_dim=[128, 144, 192], depth=[1, 2, 3], num_heads=[2, 3, 3], window_size=[7, 7, 7], kernels=[7, 5, 3, 3]) return _create_efficientvit_msra('efficientvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m2(pretrained=False, **kwargs): model_args = dict(img_size=224, embed_dim=[128, 192, 224], depth=[1, 2, 3], num_heads=[4, 3, 2], window_size=[7, 7, 7], kernels=[7, 5, 3, 3]) return _create_efficientvit_msra('efficientvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m3(pretrained=False, **kwargs): model_args = dict(img_size=224, embed_dim=[128, 240, 320], depth=[1, 2, 3], num_heads=[4, 3, 4], window_size=[7, 7, 7], kernels=[5, 5, 5, 5]) return _create_efficientvit_msra('efficientvit_m3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m4(pretrained=False, **kwargs): model_args = dict(img_size=224, embed_dim=[128, 256, 384], depth=[1, 2, 3], num_heads=[4, 4, 4], window_size=[7, 7, 7], kernels=[7, 5, 3, 3]) return _create_efficientvit_msra('efficientvit_m4', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m5(pretrained=False, **kwargs): model_args = dict(img_size=224, embed_dim=[192, 288, 384], depth=[1, 3, 4], num_heads=[3, 3, 4], window_size=[7, 7, 7], kernels=[7, 5, 3, 3]) return _create_efficientvit_msra('efficientvit_m5', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/eva.py """""" import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropout, RotaryEmbeddingCat, apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._registry import generate_default_cfgs, register_model __all__ = ['Eva'] class EvaAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim: int, num_heads: int=8, qkv_bias: bool=True, qkv_fused: bool=True, num_prefix_tokens: int=1, qkv_bias_separate: bool=False, attn_drop: float=0.0, proj_drop: float=0.0, attn_head_dim: Optional[int]=None, norm_layer: Optional[Callable]=None): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = head_dim ** (-0.5) self.num_prefix_tokens = num_prefix_tokens self.fused_attn = use_fused_attn() self.qkv_bias_separate = qkv_bias_separate if qkv_fused: self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) self.q_proj = self.k_proj = self.v_proj = None if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = self.k_bias = self.v_bias = None else: self.q_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.k_proj = nn.Linear(dim, all_head_dim, bias=False) self.v_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.qkv = None self.q_bias = self.k_bias = self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.norm = norm_layer(all_head_dim) if norm_layer is not None else nn.Identity() self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, rope: Optional[torch.Tensor]=None, attn_mask: Optional[torch.Tensor]=None): (B, N, C) = x.shape if self.qkv is not None: if self.q_bias is None: qkv = self.qkv(x) else: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.qkv_bias_separate: qkv = self.qkv(x) qkv += qkv_bias else: qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) else: q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) if rope is not None: npt = self.num_prefix_tokens q = torch.cat([q[:, :, :npt, :], apply_rot_embed_cat(q[:, :, npt:, :], rope)], dim=2).type_as(v) k = torch.cat([k[:, :, :npt, :], apply_rot_embed_cat(k[:, :, npt:, :], rope)], dim=2).type_as(v) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if attn_mask is not None: attn_mask = attn_mask.to(torch.bool) attn = attn.masked_fill(~attn_mask[:, None, None, :], float('-inf')) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.norm(x) x = self.proj(x) x = self.proj_drop(x) return x class EvaBlock(nn.Module): def __init__(self, dim: int, num_heads: int, qkv_bias: bool=True, qkv_fused: bool=True, mlp_ratio: float=4.0, swiglu_mlp: bool=False, scale_mlp: bool=False, scale_attn_inner: bool=False, num_prefix_tokens: int=1, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, init_values: Optional[float]=None, act_layer: Callable=nn.GELU, norm_layer: Callable=LayerNorm, attn_head_dim: Optional[int]=None): super().__init__() self.norm1 = norm_layer(dim) self.attn = EvaAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, num_prefix_tokens=num_prefix_tokens, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: self.mlp = SwiGLU(in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop) else: self.mlp = GluMlp(in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop) else: self.mlp = Mlp(in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor]=None, attn_mask: Optional[torch.Tensor]=None): if self.gamma_1 is None: x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.mlp(self.norm2(x))) else: x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) return x class EvaBlockPostNorm(nn.Module): def __init__(self, dim: int, num_heads: int, qkv_bias: bool=True, qkv_fused: bool=True, mlp_ratio: float=4.0, swiglu_mlp: bool=False, scale_mlp: bool=False, scale_attn_inner: bool=False, num_prefix_tokens: int=1, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, init_values: Optional[float]=None, act_layer: Callable=nn.GELU, norm_layer: Callable=nn.LayerNorm, attn_head_dim: Optional[int]=None): super().__init__() self.attn = EvaAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, num_prefix_tokens=num_prefix_tokens, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: self.mlp = SwiGLU(in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop) else: self.mlp = GluMlp(in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop) else: self.mlp = Mlp(in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor]=None, attn_mask: Optional[torch.Tensor]=None): x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class Eva(nn.Module): def __init__(self, img_size: Union[int, Tuple[int, int]]=224, patch_size: Union[int, Tuple[int, int]]=16, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', embed_dim: int=768, depth: int=12, num_heads: int=12, qkv_bias: bool=True, qkv_fused: bool=True, mlp_ratio: float=4.0, swiglu_mlp: bool=False, scale_mlp: bool=False, scale_attn_inner: bool=False, drop_rate: float=0.0, pos_drop_rate: float=0.0, patch_drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, norm_layer: Callable=LayerNorm, init_values: Optional[float]=None, class_token: bool=True, num_reg_tokens: int=0, use_abs_pos_emb: bool=True, use_rot_pos_emb: bool=False, use_post_norm: bool=False, dynamic_img_size: bool=False, dynamic_img_pad: bool=False, ref_feat_shape: Optional[Union[Tuple[int, int], int]]=None, head_init_scale: float=0.001): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.num_prefix_tokens = (1 if class_token else 0) + num_reg_tokens self.dynamic_img_size = dynamic_img_size self.grad_checkpointing = False embed_args = {} if dynamic_img_size: embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, dynamic_img_pad=dynamic_img_pad, **embed_args) num_patches = self.patch_embed.num_patches r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.reg_token = nn.Parameter(torch.zeros(1, num_reg_tokens, embed_dim)) if num_reg_tokens else None self.cls_embed = class_token and self.reg_token is None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_prefix_tokens, embed_dim)) if use_abs_pos_emb else None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout(patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens, return_indices=True) else: self.patch_drop = None if use_rot_pos_emb: ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None self.rope = RotaryEmbeddingCat(embed_dim // num_heads, in_pixels=False, feat_shape=None if dynamic_img_size else self.patch_embed.grid_size, ref_feat_shape=ref_feat_shape) else: self.rope = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock self.blocks = nn.ModuleList([block_fn(dim=embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, mlp_ratio=mlp_ratio, swiglu_mlp=swiglu_mlp, scale_mlp=scale_mlp, scale_attn_inner=scale_attn_inner, num_prefix_tokens=self.num_prefix_tokens, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values) for i in range(depth)]) self.feature_info = [dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] use_fc_norm = self.global_pool == 'avg' self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=0.02) if self.cls_token is not None: trunc_normal_(self.cls_token, std=0.02) if self.reg_token is not None: trunc_normal_(self.reg_token, std=0.02) self.fix_init_weight() if isinstance(self.head, nn.Linear): trunc_normal_(self.head.weight, std=0.02) self.head.weight.data.mul_(head_init_scale) self.head.bias.data.mul_(head_init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for (layer_id, layer) in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def no_weight_decay(self): nwd = {'pos_embed', 'cls_token'} return nwd @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^cls_token|pos_embed|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def _pos_embed(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if self.dynamic_img_size: (B, H, W, C) = x.shape if self.pos_embed is not None: pos_embed = resample_abs_pos_embed(self.pos_embed, (H, W), num_prefix_tokens=self.num_prefix_tokens) else: pos_embed = None x = x.view(B, -1, C) rot_pos_embed = self.rope.get_embed(shape=(H, W)) if self.rope is not None else None else: pos_embed = self.pos_embed rot_pos_embed = self.rope.get_embed() if self.rope is not None else None if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) if pos_embed is not None: x = x + pos_embed if self.reg_token is not None: to_cat = [] if self.cls_token is not None: to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) x = torch.cat(to_cat + [x], dim=1) x = self.pos_drop(x) if self.patch_drop is not None: (x, keep_indices) = self.patch_drop(x) if rot_pos_embed is not None and keep_indices is not None: rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices) return (x, rot_pos_embed) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, return_prefix_tokens: bool=False, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output format for EVA-ViT features must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape x = self.patch_embed(x) (x, rot_pos_embed) = self._pos_embed(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x, rope=rot_pos_embed) if i in take_indices: intermediates.append(self.norm(x) if norm else x) if self.num_prefix_tokens: prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] if reshape: (H, W) = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if not torch.jit.is_scripting() and return_prefix_tokens: intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) (x, rot_pos_embed) = self._pos_embed(x) for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(blk, x, rope=rot_pos_embed) else: x = blk(x, rope=rot_pos_embed) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model, interpolation='bicubic', antialias=True): out_dict = {} state_dict = state_dict.get('model_ema', state_dict) state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('module', state_dict) state_dict = state_dict.get('state_dict', state_dict) if 'visual.trunk.pos_embed' in state_dict: prefix = 'visual.trunk.' elif 'visual.pos_embed' in state_dict: prefix = 'visual.' else: prefix = '' mim_weights = prefix + 'mask_token' in state_dict no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict len_prefix = len(prefix) for (k, v) in state_dict.items(): if prefix: if k.startswith(prefix): k = k[len_prefix:] else: continue if 'rope' in k: continue if 'patch_embed.proj.weight' in k: (_, _, H, W) = model.patch_embed.proj.weight.shape if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed(v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed(v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True) k = k.replace('mlp.ffn_ln', 'mlp.norm') k = k.replace('attn.inner_attn_ln', 'attn.norm') k = k.replace('mlp.w12', 'mlp.fc1') k = k.replace('mlp.w1', 'mlp.fc1_g') k = k.replace('mlp.w2', 'mlp.fc1_x') k = k.replace('mlp.w3', 'mlp.fc2') if no_qkv: k = k.replace('q_bias', 'q_proj.bias') k = k.replace('v_bias', 'v_proj.bias') if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'): if k == 'norm.weight' or k == 'norm.bias': k = k.replace('norm', 'fc_norm') else: continue out_dict[k] = v return out_dict def _create_eva(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg(Eva, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'license': 'mit', **kwargs} default_cfgs = generate_default_cfgs({'eva_giant_patch14_224.clip_ft_in1k': _cfg(hf_hub_id='timm/'), 'eva_giant_patch14_336.clip_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'), 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash'), 'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash'), 'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash'), 'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0), 'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0), 'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0), 'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0), 'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0), 'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841), 'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841), 'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841), 'eva02_tiny_patch14_224.mim_in22k': _cfg(hf_hub_id='timm/', num_classes=0), 'eva02_small_patch14_224.mim_in22k': _cfg(hf_hub_id='timm/', num_classes=0), 'eva02_base_patch14_224.mim_in22k': _cfg(hf_hub_id='timm/', num_classes=0), 'eva02_large_patch14_224.mim_in22k': _cfg(hf_hub_id='timm/', num_classes=0), 'eva02_large_patch14_224.mim_m38m': _cfg(hf_hub_id='timm/', num_classes=0), 'eva_giant_patch14_clip_224.laion400m': _cfg(hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024), 'eva_giant_patch14_clip_224.merged2b': _cfg(hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024), 'eva02_base_patch16_clip_224.merged2b': _cfg(hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=512), 'eva02_large_patch14_clip_224.merged2b': _cfg(hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=768), 'eva02_large_patch14_clip_336.merged2b': _cfg(hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 336, 336), crop_pct=1.0, num_classes=768), 'eva02_enormous_patch14_clip_224.laion2b': _cfg(hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024), 'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg(hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024), 'eva02_enormous_patch14_clip_224.pretrain': _cfg(num_classes=0), 'vit_medium_patch16_rope_reg1_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), 'vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), 'vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_base_patch16_rope_reg1_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))}) @register_model def eva_giant_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_560(pretrained=False, **kwargs) -> Eva: model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=224, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=224, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=224, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=336, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=336, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=448, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=448, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16)) model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_clip_224(pretrained=False, **kwargs) -> Eva: model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408, global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch16_clip_224(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_224(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_336(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=336, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_enormous_patch14_clip_224(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=224, patch_size=14, embed_dim=1792, depth=64, num_heads=16, mlp_ratio=15360 / 1792, use_post_norm=True, global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=256, patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_fused=True, qkv_bias=True, init_values=1e-05, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16)) model = _create_eva('vit_medium_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_mediumd_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=256, patch_size=16, embed_dim=512, depth=20, num_heads=8, qkv_fused=True, qkv_bias=False, init_values=1e-05, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16)) model = _create_eva('vit_mediumd_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_rope_reg4_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=256, patch_size=16, embed_dim=640, depth=12, num_heads=10, qkv_fused=True, qkv_bias=True, init_values=1e-05, class_token=False, num_reg_tokens=4, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16)) model = _create_eva('vit_betwixt_patch16_rope_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rope_reg1_gap_256(pretrained=False, **kwargs) -> Eva: model_args = dict(img_size=256, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=True, qkv_bias=True, init_values=1e-05, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16)) model = _create_eva('vit_base_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/fastvit.py import os from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, create_conv2d, ConvNormAct, SqueezeExcite, use_fused_attn, ClassifierHead from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['FastVit'] def num_groups(group_size, channels): if not group_size: return 1 else: assert channels % group_size == 0 return channels // group_size class MobileOneBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int, stride: int=1, dilation: int=1, group_size: int=0, inference_mode: bool=False, use_se: bool=False, use_act: bool=True, use_scale_branch: bool=True, num_conv_branches: int=1, act_layer: nn.Module=nn.GELU) -> None: super(MobileOneBlock, self).__init__() self.inference_mode = inference_mode self.groups = num_groups(group_size, in_chs) self.stride = stride self.dilation = dilation self.kernel_size = kernel_size self.in_chs = in_chs self.out_chs = out_chs self.num_conv_branches = num_conv_branches self.se = SqueezeExcite(out_chs, rd_divisor=1) if use_se else nn.Identity() if inference_mode: self.reparam_conv = create_conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=self.groups, bias=True) else: self.reparam_conv = None self.identity = nn.BatchNorm2d(num_features=in_chs) if out_chs == in_chs and stride == 1 else None if num_conv_branches > 0: self.conv_kxk = nn.ModuleList([ConvNormAct(self.in_chs, self.out_chs, kernel_size=kernel_size, stride=self.stride, groups=self.groups, apply_act=False) for _ in range(self.num_conv_branches)]) else: self.conv_kxk = None self.conv_scale = None if kernel_size > 1 and use_scale_branch: self.conv_scale = ConvNormAct(self.in_chs, self.out_chs, kernel_size=1, stride=self.stride, groups=self.groups, apply_act=False) self.act = act_layer() if use_act else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: return self.act(self.se(self.reparam_conv(x))) identity_out = 0 if self.identity is not None: identity_out = self.identity(x) scale_out = 0 if self.conv_scale is not None: scale_out = self.conv_scale(x) out = scale_out + identity_out if self.conv_kxk is not None: for rc in self.conv_kxk: out += rc(x) return self.act(self.se(out)) def reparameterize(self): if self.reparam_conv is not None: return (kernel, bias) = self._get_kernel_bias() self.reparam_conv = create_conv2d(in_channels=self.in_chs, out_channels=self.out_chs, kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups, bias=True) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias for (name, para) in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_scale') if hasattr(self, 'identity'): self.__delattr__('identity') self.inference_mode = True def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: kernel_scale = 0 bias_scale = 0 if self.conv_scale is not None: (kernel_scale, bias_scale) = self._fuse_bn_tensor(self.conv_scale) pad = self.kernel_size // 2 kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) kernel_identity = 0 bias_identity = 0 if self.identity is not None: (kernel_identity, bias_identity) = self._fuse_bn_tensor(self.identity) kernel_conv = 0 bias_conv = 0 if self.conv_kxk is not None: for ix in range(self.num_conv_branches): (_kernel, _bias) = self._fuse_bn_tensor(self.conv_kxk[ix]) kernel_conv += _kernel bias_conv += _bias kernel_final = kernel_conv + kernel_scale + kernel_identity bias_final = bias_conv + bias_scale + bias_identity return (kernel_final, bias_final) def _fuse_bn_tensor(self, branch: Union[nn.Sequential, nn.BatchNorm2d]) -> Tuple[torch.Tensor, torch.Tensor]: if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): input_dim = self.in_chs // self.groups kernel_value = torch.zeros((self.in_chs, input_dim, self.kernel_size, self.kernel_size), dtype=branch.weight.dtype, device=branch.weight.device) for i in range(self.in_chs): kernel_value[i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return (kernel * t, beta - running_mean * gamma / std) class ReparamLargeKernelConv(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int, stride: int, group_size: int, small_kernel: Optional[int]=None, use_se: bool=False, act_layer: Optional[nn.Module]=None, inference_mode: bool=False) -> None: super(ReparamLargeKernelConv, self).__init__() self.stride = stride self.groups = num_groups(group_size, in_chs) self.in_chs = in_chs self.out_chs = out_chs self.kernel_size = kernel_size self.small_kernel = small_kernel if inference_mode: self.reparam_conv = create_conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=1, groups=self.groups, bias=True) else: self.reparam_conv = None self.large_conv = ConvNormAct(in_chs, out_chs, kernel_size=kernel_size, stride=self.stride, groups=self.groups, apply_act=False) if small_kernel is not None: assert small_kernel <= kernel_size, 'The kernel size for re-param cannot be larger than the large kernel!' self.small_conv = ConvNormAct(in_chs, out_chs, kernel_size=small_kernel, stride=self.stride, groups=self.groups, apply_act=False) self.se = SqueezeExcite(out_chs, rd_ratio=0.25) if use_se else nn.Identity() self.act = act_layer() if act_layer is not None else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: out = self.reparam_conv(x) else: out = self.large_conv(x) if self.small_conv is not None: out = out + self.small_conv(x) out = self.se(out) out = self.act(out) return out def get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: (eq_k, eq_b) = self._fuse_bn(self.large_conv.conv, self.large_conv.bn) if hasattr(self, 'small_conv'): (small_k, small_b) = self._fuse_bn(self.small_conv.conv, self.small_conv.bn) eq_b += small_b eq_k += nn.functional.pad(small_k, [(self.kernel_size - self.small_kernel) // 2] * 4) return (eq_k, eq_b) def reparameterize(self) -> None: (eq_k, eq_b) = self.get_kernel_bias() self.reparam_conv = create_conv2d(self.in_chs, self.out_chs, kernel_size=self.kernel_size, stride=self.stride, groups=self.groups, bias=True) self.reparam_conv.weight.data = eq_k self.reparam_conv.bias.data = eq_b self.__delattr__('large_conv') if hasattr(self, 'small_conv'): self.__delattr__('small_conv') @staticmethod def _fuse_bn(conv: nn.Conv2d, bn: nn.BatchNorm2d) -> Tuple[torch.Tensor, torch.Tensor]: kernel = conv.weight running_mean = bn.running_mean running_var = bn.running_var gamma = bn.weight beta = bn.bias eps = bn.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return (kernel * t, beta - running_mean * gamma / std) def convolutional_stem(in_chs: int, out_chs: int, act_layer: nn.Module=nn.GELU, inference_mode: bool=False) -> nn.Sequential: return nn.Sequential(MobileOneBlock(in_chs=in_chs, out_chs=out_chs, kernel_size=3, stride=2, act_layer=act_layer, inference_mode=inference_mode), MobileOneBlock(in_chs=out_chs, out_chs=out_chs, kernel_size=3, stride=2, group_size=1, act_layer=act_layer, inference_mode=inference_mode), MobileOneBlock(in_chs=out_chs, out_chs=out_chs, kernel_size=1, stride=1, act_layer=act_layer, inference_mode=inference_mode)) class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim: int, head_dim: int=32, qkv_bias: bool=False, attn_drop: float=0.0, proj_drop: float=0.0) -> None: super().__init__() assert dim % head_dim == 0, 'dim should be divisible by head_dim' self.head_dim = head_dim self.num_heads = dim // head_dim self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x: torch.Tensor) -> torch.Tensor: (B, C, H, W) = x.shape N = H * W x = x.flatten(2).transpose(-2, -1) qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) x = x.transpose(-2, -1).reshape(B, C, H, W) return x class PatchEmbed(nn.Module): def __init__(self, patch_size: int, stride: int, in_chs: int, embed_dim: int, act_layer: nn.Module=nn.GELU, lkc_use_act: bool=False, use_se: bool=False, inference_mode: bool=False) -> None: super().__init__() self.proj = nn.Sequential(ReparamLargeKernelConv(in_chs=in_chs, out_chs=embed_dim, kernel_size=patch_size, stride=stride, group_size=1, small_kernel=3, use_se=use_se, act_layer=act_layer if lkc_use_act else None, inference_mode=inference_mode), MobileOneBlock(in_chs=embed_dim, out_chs=embed_dim, kernel_size=1, stride=1, use_se=False, act_layer=act_layer, inference_mode=inference_mode)) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim, 1, 1)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class RepMixer(nn.Module): def __init__(self, dim, kernel_size=3, layer_scale_init_value=1e-05, inference_mode: bool=False): super().__init__() self.dim = dim self.kernel_size = kernel_size self.inference_mode = inference_mode if inference_mode: self.reparam_conv = nn.Conv2d(self.dim, self.dim, kernel_size=self.kernel_size, stride=1, padding=self.kernel_size // 2, groups=self.dim, bias=True) else: self.reparam_conv = None self.norm = MobileOneBlock(dim, dim, kernel_size, group_size=1, use_act=False, use_scale_branch=False, num_conv_branches=0) self.mixer = MobileOneBlock(dim, dim, kernel_size, group_size=1, use_act=False) if layer_scale_init_value is not None: self.layer_scale = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale = nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: x = self.reparam_conv(x) else: x = x + self.layer_scale(self.mixer(x) - self.norm(x)) return x def reparameterize(self) -> None: if self.inference_mode: return self.mixer.reparameterize() self.norm.reparameterize() if isinstance(self.layer_scale, LayerScale2d): w = self.mixer.id_tensor + self.layer_scale.gamma.unsqueeze(-1) * (self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight) b = torch.squeeze(self.layer_scale.gamma) * (self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias) else: w = self.mixer.id_tensor + self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight b = self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias self.reparam_conv = create_conv2d(self.dim, self.dim, kernel_size=self.kernel_size, stride=1, groups=self.dim, bias=True) self.reparam_conv.weight.data = w self.reparam_conv.bias.data = b for (name, para) in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('mixer') self.__delattr__('norm') self.__delattr__('layer_scale') class ConvMlp(nn.Module): def __init__(self, in_chs: int, hidden_channels: Optional[int]=None, out_chs: Optional[int]=None, act_layer: nn.Module=nn.GELU, drop: float=0.0) -> None: super().__init__() out_chs = out_chs or in_chs hidden_channels = hidden_channels or in_chs self.conv = ConvNormAct(in_chs, out_chs, kernel_size=7, groups=in_chs, apply_act=False) self.fc1 = nn.Conv2d(in_chs, hidden_channels, kernel_size=1) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_channels, out_chs, kernel_size=1) self.drop = nn.Dropout(drop) self.apply(self._init_weights) def _init_weights(self, m: nn.Module) -> None: if isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.conv(x) x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class RepConditionalPosEnc(nn.Module): def __init__(self, dim: int, dim_out: Optional[int]=None, spatial_shape: Union[int, Tuple[int, int]]=(7, 7), inference_mode=False) -> None: super(RepConditionalPosEnc, self).__init__() if isinstance(spatial_shape, int): spatial_shape = tuple([spatial_shape] * 2) assert isinstance(spatial_shape, Tuple), f'"spatial_shape" must by a sequence or int, get {type(spatial_shape)} instead.' assert len(spatial_shape) == 2, f'Length of "spatial_shape" should be 2, got {len(spatial_shape)} instead.' self.spatial_shape = spatial_shape self.dim = dim self.dim_out = dim_out or dim self.groups = dim if inference_mode: self.reparam_conv = nn.Conv2d(self.dim, self.dim_out, kernel_size=self.spatial_shape, stride=1, padding=spatial_shape[0] // 2, groups=self.groups, bias=True) else: self.reparam_conv = None self.pos_enc = nn.Conv2d(self.dim, self.dim_out, spatial_shape, 1, int(spatial_shape[0] // 2), groups=self.groups, bias=True) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: x = self.reparam_conv(x) else: x = self.pos_enc(x) + x return x def reparameterize(self) -> None: input_dim = self.dim // self.groups kernel_value = torch.zeros((self.dim, input_dim, self.spatial_shape[0], self.spatial_shape[1]), dtype=self.pos_enc.weight.dtype, device=self.pos_enc.weight.device) for i in range(self.dim): kernel_value[i, i % input_dim, self.spatial_shape[0] // 2, self.spatial_shape[1] // 2] = 1 id_tensor = kernel_value w_final = id_tensor + self.pos_enc.weight b_final = self.pos_enc.bias self.reparam_conv = nn.Conv2d(self.dim, self.dim_out, kernel_size=self.spatial_shape, stride=1, padding=int(self.spatial_shape[0] // 2), groups=self.groups, bias=True) self.reparam_conv.weight.data = w_final self.reparam_conv.bias.data = b_final for (name, para) in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('pos_enc') class RepMixerBlock(nn.Module): def __init__(self, dim: int, kernel_size: int=3, mlp_ratio: float=4.0, act_layer: nn.Module=nn.GELU, proj_drop: float=0.0, drop_path: float=0.0, layer_scale_init_value: float=1e-05, inference_mode: bool=False): super().__init__() self.token_mixer = RepMixer(dim, kernel_size=kernel_size, layer_scale_init_value=layer_scale_init_value, inference_mode=inference_mode) self.mlp = ConvMlp(in_chs=dim, hidden_channels=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) if layer_scale_init_value is not None: self.layer_scale = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale = nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = self.token_mixer(x) x = x + self.drop_path(self.layer_scale(self.mlp(x))) return x class AttentionBlock(nn.Module): def __init__(self, dim: int, mlp_ratio: float=4.0, act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.BatchNorm2d, proj_drop: float=0.0, drop_path: float=0.0, layer_scale_init_value: float=1e-05): super().__init__() self.norm = norm_layer(dim) self.token_mixer = Attention(dim=dim) if layer_scale_init_value is not None: self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale_1 = nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = ConvMlp(in_chs=dim, hidden_channels=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) if layer_scale_init_value is not None: self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale_2 = nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.layer_scale_1(self.token_mixer(self.norm(x)))) x = x + self.drop_path2(self.layer_scale_2(self.mlp(x))) return x class FastVitStage(nn.Module): def __init__(self, dim: int, dim_out: int, depth: int, token_mixer_type: str, downsample: bool=True, se_downsample: bool=False, down_patch_size: int=7, down_stride: int=2, pos_emb_layer: Optional[nn.Module]=None, kernel_size: int=3, mlp_ratio: float=4.0, act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.BatchNorm2d, proj_drop_rate: float=0.0, drop_path_rate: float=0.0, layer_scale_init_value: Optional[float]=1e-05, lkc_use_act=False, inference_mode=False): super().__init__() self.grad_checkpointing = False if downsample: self.downsample = PatchEmbed(patch_size=down_patch_size, stride=down_stride, in_chs=dim, embed_dim=dim_out, use_se=se_downsample, act_layer=act_layer, lkc_use_act=lkc_use_act, inference_mode=inference_mode) else: assert dim == dim_out self.downsample = nn.Identity() if pos_emb_layer is not None: self.pos_emb = pos_emb_layer(dim_out, inference_mode=inference_mode) else: self.pos_emb = nn.Identity() blocks = [] for block_idx in range(depth): if token_mixer_type == 'repmixer': blocks.append(RepMixerBlock(dim_out, kernel_size=kernel_size, mlp_ratio=mlp_ratio, act_layer=act_layer, proj_drop=proj_drop_rate, drop_path=drop_path_rate[block_idx], layer_scale_init_value=layer_scale_init_value, inference_mode=inference_mode)) elif token_mixer_type == 'attention': blocks.append(AttentionBlock(dim_out, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer, proj_drop=proj_drop_rate, drop_path=drop_path_rate[block_idx], layer_scale_init_value=layer_scale_init_value)) else: raise ValueError('Token mixer type: {} not supported'.format(token_mixer_type)) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.pos_emb(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class FastVit(nn.Module): fork_feat: torch.jit.Final[bool] '' def __init__(self, in_chans: int=3, layers: Tuple[int, ...]=(2, 2, 6, 2), token_mixers: Tuple[str, ...]=('repmixer', 'repmixer', 'repmixer', 'repmixer'), embed_dims: Tuple[int, ...]=(64, 128, 256, 512), mlp_ratios: Tuple[float, ...]=(4,) * 4, downsamples: Tuple[bool, ...]=(False, True, True, True), se_downsamples: Tuple[bool, ...]=(False, False, False, False), repmixer_kernel_size: int=3, num_classes: int=1000, pos_embs: Tuple[Optional[nn.Module], ...]=(None,) * 4, down_patch_size: int=7, down_stride: int=2, drop_rate: float=0.0, proj_drop_rate: float=0.0, drop_path_rate: float=0.0, layer_scale_init_value: float=1e-05, lkc_use_act: bool=False, fork_feat: bool=False, cls_ratio: float=2.0, global_pool: str='avg', norm_layer: nn.Module=nn.BatchNorm2d, act_layer: nn.Module=nn.GELU, inference_mode: bool=False) -> None: super().__init__() self.num_classes = 0 if fork_feat else num_classes self.fork_feat = fork_feat self.global_pool = global_pool self.feature_info = [] self.stem = convolutional_stem(in_chans, embed_dims[0], act_layer, inference_mode) prev_dim = embed_dims[0] scale = 1 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] stages = [] for i in range(len(layers)): downsample = downsamples[i] or prev_dim != embed_dims[i] stage = FastVitStage(dim=prev_dim, dim_out=embed_dims[i], depth=layers[i], downsample=downsample, se_downsample=se_downsamples[i], down_patch_size=down_patch_size, down_stride=down_stride, pos_emb_layer=pos_embs[i], token_mixer_type=token_mixers[i], kernel_size=repmixer_kernel_size, mlp_ratio=mlp_ratios[i], act_layer=act_layer, norm_layer=norm_layer, proj_drop_rate=proj_drop_rate, drop_path_rate=dpr[i], layer_scale_init_value=layer_scale_init_value, lkc_use_act=lkc_use_act, inference_mode=inference_mode) stages.append(stage) prev_dim = embed_dims[i] if downsample: scale *= 2 self.feature_info += [dict(num_chs=prev_dim, reduction=4 * scale, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_stages = len(self.stages) self.num_features = self.head_hidden_size = prev_dim if self.fork_feat: self.out_indices = [0, 1, 2, 3] for (i_emb, i_layer) in enumerate(self.out_indices): if i_emb == 0 and os.environ.get('FORK_LAST3', None): '' layer = nn.Identity() else: layer = norm_layer(embed_dims[i_emb]) layer_name = f'norm{i_layer}' self.add_module(layer_name, layer) else: self.num_features = self.head_hidden_size = final_features = int(embed_dims[-1] * cls_ratio) self.final_conv = MobileOneBlock(in_chs=embed_dims[-1], out_chs=final_features, kernel_size=3, stride=1, group_size=1, inference_mode=inference_mode, use_se=True, act_layer=act_layer, num_conv_branches=1) self.head = ClassifierHead(final_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) self.apply(self._init_weights) def _init_weights(self, m: nn.Module) -> None: if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return set() @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+).downsample', (0,)), ('^stages\\.(\\d+).pos_emb', (0,)), ('^stages\\.(\\d+)\\.\\w+\\.(\\d+)', None)]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stages), indices) x = self.stem(x) last_idx = self.num_stages - 1 if torch.jit.is_scripting() or not stop_early: stages = self.stages else: stages = self.stages[:max_index + 1] feat_idx = 0 for (feat_idx, stage) in enumerate(stages): x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.final_conv(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.stem(x) outs = [] for (idx, block) in enumerate(self.stages): x = block(x) if self.fork_feat: if idx in self.out_indices: norm_layer = getattr(self, f'norm{idx}') x_out = norm_layer(x) outs.append(x_out) if self.fork_feat: return outs x = self.final_conv(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) if self.fork_feat: return x x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': ('stem.0.conv_kxk.0.conv', 'stem.0.conv_scale.conv'), 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'fastvit_t8.apple_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_t12.apple_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_s12.apple_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_sa12.apple_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_sa24.apple_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_sa36.apple_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_ma36.apple_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95), 'fastvit_t8.apple_dist_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_t12.apple_dist_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_s12.apple_dist_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_sa12.apple_dist_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_sa24.apple_dist_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_sa36.apple_dist_in1k': _cfg(hf_hub_id='timm/'), 'fastvit_ma36.apple_dist_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95), 'fastvit_mci0.apple_mclip': _cfg(hf_hub_id='apple/mobileclip_s0_timm', url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s0.pt', crop_pct=0.95, num_classes=512, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)), 'fastvit_mci1.apple_mclip': _cfg(hf_hub_id='apple/mobileclip_s1_timm', url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s1.pt', crop_pct=0.95, num_classes=512, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)), 'fastvit_mci2.apple_mclip': _cfg(hf_hub_id='apple/mobileclip_s2_timm', url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s2.pt', crop_pct=0.95, num_classes=512, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0))}) def checkpoint_filter_fn(state_dict, model): if 'stem.0.conv_kxk.0.conv.weight' in state_dict: return state_dict state_dict = state_dict.get('state_dict', state_dict) if 'image_encoder.model.patch_embed.0.rbr_conv.0.conv.weight' in state_dict: prefix = 'image_encoder.model.' else: prefix = '' import re import bisect stage_ends = [] for (k, v) in state_dict.items(): match = re.match('^(.*?)network\\.(\\d+)\\.proj.*', k) if match: stage_ends.append(int(match.group(2))) stage_ends = list(sorted(set(stage_ends))) out_dict = {} for (k, v) in state_dict.items(): if prefix: if prefix not in k: continue k = k.replace(prefix, '') k = k.replace('patch_embed', 'stem') k = k.replace('rbr_conv', 'conv_kxk') k = k.replace('rbr_scale', 'conv_scale') k = k.replace('rbr_skip', 'identity') k = k.replace('conv_exp', 'final_conv') k = k.replace('lkb_origin', 'large_conv') k = k.replace('convffn', 'mlp') k = k.replace('se.reduce', 'se.fc1') k = k.replace('se.expand', 'se.fc2') k = re.sub('layer_scale_([0-9])', 'layer_scale_\\1.gamma', k) if k.endswith('layer_scale'): k = k.replace('layer_scale', 'layer_scale.gamma') k = k.replace('dist_head', 'head_dist') if k.startswith('head.'): if k == 'head.proj' and hasattr(model.head, 'fc') and isinstance(model.head.fc, nn.Linear): k = k.replace('head.proj', 'head.fc.weight') v = v.T out_dict['head.fc.bias'] = torch.zeros(v.shape[0]) else: k = k.replace('head.', 'head.fc.') match = re.match('^network\\.(\\d+)', k) (stage_idx, net_idx) = (None, None) if match: net_idx = int(match.group(1)) stage_idx = bisect.bisect_right(stage_ends, net_idx) if stage_idx is not None: net_prefix = f'network.{net_idx}' stage_prefix = f'stages.{stage_idx}' if net_prefix + '.proj' in k: k = k.replace(net_prefix + '.proj', stage_prefix + '.downsample.proj') elif net_prefix + '.pe' in k: k = k.replace(net_prefix + '.pe', stage_prefix + '.pos_emb.pos_enc') else: k = k.replace(net_prefix, stage_prefix + '.blocks') out_dict[k] = v return out_dict def _create_fastvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg(FastVit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def fastvit_t8(pretrained=False, **kwargs): model_args = dict(layers=(2, 2, 4, 2), embed_dims=(48, 96, 192, 384), mlp_ratios=(3, 3, 3, 3), token_mixers=('repmixer', 'repmixer', 'repmixer', 'repmixer')) return _create_fastvit('fastvit_t8', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_t12(pretrained=False, **kwargs): model_args = dict(layers=(2, 2, 6, 2), embed_dims=(64, 128, 256, 512), mlp_ratios=(3, 3, 3, 3), token_mixers=('repmixer', 'repmixer', 'repmixer', 'repmixer')) return _create_fastvit('fastvit_t12', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_s12(pretrained=False, **kwargs): model_args = dict(layers=(2, 2, 6, 2), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), token_mixers=('repmixer', 'repmixer', 'repmixer', 'repmixer')) return _create_fastvit('fastvit_s12', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_sa12(pretrained=False, **kwargs): model_args = dict(layers=(2, 2, 6, 2), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=('repmixer', 'repmixer', 'repmixer', 'attention')) return _create_fastvit('fastvit_sa12', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_sa24(pretrained=False, **kwargs): model_args = dict(layers=(4, 4, 12, 4), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=('repmixer', 'repmixer', 'repmixer', 'attention')) return _create_fastvit('fastvit_sa24', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_sa36(pretrained=False, **kwargs): model_args = dict(layers=(6, 6, 18, 6), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=('repmixer', 'repmixer', 'repmixer', 'attention')) return _create_fastvit('fastvit_sa36', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_ma36(pretrained=False, **kwargs): model_args = dict(layers=(6, 6, 18, 6), embed_dims=(76, 152, 304, 608), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=('repmixer', 'repmixer', 'repmixer', 'attention')) return _create_fastvit('fastvit_ma36', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_mci0(pretrained=False, **kwargs): model_args = dict(layers=(2, 6, 10, 2), embed_dims=(64, 128, 256, 512), mlp_ratios=(3, 3, 3, 3), se_downsamples=(False, False, True, True), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=('repmixer', 'repmixer', 'repmixer', 'attention'), lkc_use_act=True) return _create_fastvit('fastvit_mci0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_mci1(pretrained=False, **kwargs): model_args = dict(layers=(4, 12, 20, 4), embed_dims=(64, 128, 256, 512), mlp_ratios=(3, 3, 3, 3), se_downsamples=(False, False, True, True), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=('repmixer', 'repmixer', 'repmixer', 'attention'), lkc_use_act=True) return _create_fastvit('fastvit_mci1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_mci2(pretrained=False, **kwargs): model_args = dict(layers=(4, 12, 24, 4), embed_dims=(80, 160, 320, 640), mlp_ratios=(3, 3, 3, 3), se_downsamples=(False, False, True, True), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=('repmixer', 'repmixer', 'repmixer', 'attention'), lkc_use_act=True) return _create_fastvit('fastvit_mci2', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/focalnet.py """""" from functools import partial from typing import Callable, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, LayerNorm2d, trunc_normal_, ClassifierHead, NormMlpClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import generate_default_cfgs, register_model __all__ = ['FocalNet'] class FocalModulation(nn.Module): def __init__(self, dim: int, focal_window, focal_level: int, focal_factor: int=2, bias: bool=True, use_post_norm: bool=False, normalize_modulator: bool=False, proj_drop: float=0.0, norm_layer: Callable=LayerNorm2d): super().__init__() self.dim = dim self.focal_window = focal_window self.focal_level = focal_level self.focal_factor = focal_factor self.use_post_norm = use_post_norm self.normalize_modulator = normalize_modulator self.input_split = [dim, dim, self.focal_level + 1] self.f = nn.Conv2d(dim, 2 * dim + (self.focal_level + 1), kernel_size=1, bias=bias) self.h = nn.Conv2d(dim, dim, kernel_size=1, bias=bias) self.act = nn.GELU() self.proj = nn.Conv2d(dim, dim, kernel_size=1) self.proj_drop = nn.Dropout(proj_drop) self.focal_layers = nn.ModuleList() self.kernel_sizes = [] for k in range(self.focal_level): kernel_size = self.focal_factor * k + self.focal_window self.focal_layers.append(nn.Sequential(nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=dim, padding=kernel_size // 2, bias=False), nn.GELU())) self.kernel_sizes.append(kernel_size) self.norm = norm_layer(dim) if self.use_post_norm else nn.Identity() def forward(self, x): x = self.f(x) (q, ctx, gates) = torch.split(x, self.input_split, 1) ctx_all = 0 for (l, focal_layer) in enumerate(self.focal_layers): ctx = focal_layer(ctx) ctx_all = ctx_all + ctx * gates[:, l:l + 1] ctx_global = self.act(ctx.mean((2, 3), keepdim=True)) ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:] if self.normalize_modulator: ctx_all = ctx_all / (self.focal_level + 1) x_out = q * self.h(ctx_all) x_out = self.norm(x_out) x_out = self.proj(x_out) x_out = self.proj_drop(x_out) return x_out class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class FocalNetBlock(nn.Module): def __init__(self, dim: int, mlp_ratio: float=4.0, focal_level: int=1, focal_window: int=3, use_post_norm: bool=False, use_post_norm_in_modulation: bool=False, normalize_modulator: bool=False, layerscale_value: float=0.0001, proj_drop: float=0.0, drop_path: float=0.0, act_layer: Callable=nn.GELU, norm_layer: Callable=LayerNorm2d): super().__init__() self.dim = dim self.mlp_ratio = mlp_ratio self.focal_window = focal_window self.focal_level = focal_level self.use_post_norm = use_post_norm self.norm1 = norm_layer(dim) if not use_post_norm else nn.Identity() self.modulation = FocalModulation(dim, focal_window=focal_window, focal_level=self.focal_level, use_post_norm=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, proj_drop=proj_drop, norm_layer=norm_layer) self.norm1_post = norm_layer(dim) if use_post_norm else nn.Identity() self.ls1 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) if not use_post_norm else nn.Identity() self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, use_conv=True) self.norm2_post = norm_layer(dim) if use_post_norm else nn.Identity() self.ls2 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = x x = self.norm1(x) x = self.modulation(x) x = self.norm1_post(x) x = shortcut + self.drop_path1(self.ls1(x)) x = x + self.drop_path2(self.ls2(self.norm2_post(self.mlp(self.norm2(x))))) return x class FocalNetStage(nn.Module): def __init__(self, dim: int, out_dim: int, depth: int, mlp_ratio: float=4.0, downsample: bool=True, focal_level: int=1, focal_window: int=1, use_overlap_down: bool=False, use_post_norm: bool=False, use_post_norm_in_modulation: bool=False, normalize_modulator: bool=False, layerscale_value: float=0.0001, proj_drop: float=0.0, drop_path: float=0.0, norm_layer: Callable=LayerNorm2d): super().__init__() self.dim = dim self.depth = depth self.grad_checkpointing = False if downsample: self.downsample = Downsample(in_chs=dim, out_chs=out_dim, stride=2, overlap=use_overlap_down, norm_layer=norm_layer) else: self.downsample = nn.Identity() self.blocks = nn.ModuleList([FocalNetBlock(dim=out_dim, mlp_ratio=mlp_ratio, focal_level=focal_level, focal_window=focal_window, use_post_norm=use_post_norm, use_post_norm_in_modulation=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, layerscale_value=layerscale_value, proj_drop=proj_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x): x = self.downsample(x) for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint.checkpoint(blk, x) else: x = blk(x) return x class Downsample(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=4, overlap: bool=False, norm_layer: Optional[Callable]=None): super().__init__() self.stride = stride padding = 0 kernel_size = stride if overlap: assert stride in (2, 4) if stride == 4: (kernel_size, padding) = (7, 2) elif stride == 2: (kernel_size, padding) = (3, 1) self.proj = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) self.norm = norm_layer(out_chs) if norm_layer is not None else nn.Identity() def forward(self, x): x = self.proj(x) x = self.norm(x) return x class FocalNet(nn.Module): def __init__(self, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', embed_dim: int=96, depths: Tuple[int, ...]=(2, 2, 6, 2), mlp_ratio: float=4.0, focal_levels: Tuple[int, ...]=(2, 2, 2, 2), focal_windows: Tuple[int, ...]=(3, 3, 3, 3), use_overlap_down: bool=False, use_post_norm: bool=False, use_post_norm_in_modulation: bool=False, normalize_modulator: bool=False, head_hidden_size: Optional[int]=None, head_init_scale: float=1.0, layerscale_value: Optional[float]=None, drop_rate: bool=0.0, proj_drop_rate: bool=0.0, drop_path_rate: bool=0.1, norm_layer: Callable=partial(LayerNorm2d, eps=1e-05)): super().__init__() self.num_layers = len(depths) embed_dim = [embed_dim * 2 ** i for i in range(self.num_layers)] self.num_classes = num_classes self.embed_dim = embed_dim self.num_features = self.head_hidden_size = embed_dim[-1] self.feature_info = [] self.stem = Downsample(in_chs=in_chans, out_chs=embed_dim[0], overlap=use_overlap_down, norm_layer=norm_layer) in_dim = embed_dim[0] dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] layers = [] for i_layer in range(self.num_layers): out_dim = embed_dim[i_layer] layer = FocalNetStage(dim=in_dim, out_dim=out_dim, depth=depths[i_layer], mlp_ratio=mlp_ratio, downsample=i_layer > 0, focal_level=focal_levels[i_layer], focal_window=focal_windows[i_layer], use_overlap_down=use_overlap_down, use_post_norm=use_post_norm, use_post_norm_in_modulation=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, layerscale_value=layerscale_value, proj_drop=proj_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer) in_dim = out_dim layers += [layer] self.feature_info += [dict(num_chs=out_dim, reduction=4 * 2 ** i_layer, module=f'layers.{i_layer}')] self.layers = nn.Sequential(*layers) if head_hidden_size: self.norm = nn.Identity() self.head_hidden_size = head_hidden_size self.head = NormMlpClassifierHead(self.num_features, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer) else: self.norm = norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def no_weight_decay(self): return {''} @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks=[('^layers\\.(\\d+)', None), ('^norm', (99999,))] if coarse else [('^layers\\.(\\d+).downsample', (0,)), ('^layers\\.(\\d+)\\.\\w+\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for l in self.layers: l.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) if name and 'head.fc' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs} default_cfgs = generate_default_cfgs({'focalnet_tiny_srf.ms_in1k': _cfg(hf_hub_id='timm/'), 'focalnet_small_srf.ms_in1k': _cfg(hf_hub_id='timm/'), 'focalnet_base_srf.ms_in1k': _cfg(hf_hub_id='timm/'), 'focalnet_tiny_lrf.ms_in1k': _cfg(hf_hub_id='timm/'), 'focalnet_small_lrf.ms_in1k': _cfg(hf_hub_id='timm/'), 'focalnet_base_lrf.ms_in1k': _cfg(hf_hub_id='timm/'), 'focalnet_large_fl3.ms_in22k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), 'focalnet_large_fl4.ms_in22k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), 'focalnet_xlarge_fl3.ms_in22k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), 'focalnet_xlarge_fl4.ms_in22k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), 'focalnet_huge_fl3.ms_in22k': _cfg(hf_hub_id='timm/', num_classes=21842), 'focalnet_huge_fl4.ms_in22k': _cfg(hf_hub_id='timm/', num_classes=0)}) def checkpoint_filter_fn(state_dict, model: FocalNet): state_dict = state_dict.get('model', state_dict) if 'stem.proj.weight' in state_dict: return state_dict import re out_dict = {} dest_dict = model.state_dict() for (k, v) in state_dict.items(): k = re.sub('gamma_([0-9])', 'ls\\1.gamma', k) k = k.replace('patch_embed', 'stem') k = re.sub('layers.(\\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) if 'norm' in k and k not in dest_dict: k = re.sub('norm([0-9])', 'norm\\1_post', k) k = k.replace('ln.', 'norm.') k = k.replace('head', 'head.fc') if k in dest_dict and dest_dict[k].numel() == v.numel() and (dest_dict[k].shape != v.shape): v = v.reshape(dest_dict[k].shape) out_dict[k] = v return out_dict def _create_focalnet(variant, pretrained=False, **kwargs): default_out_indices = tuple((i for (i, _) in enumerate(kwargs.get('depths', (1, 1, 3, 1))))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(FocalNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def focalnet_tiny_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, **kwargs) return _create_focalnet('focalnet_tiny_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_small_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, **kwargs) return _create_focalnet('focalnet_small_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_base_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, **kwargs) return _create_focalnet('focalnet_base_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_tiny_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_tiny_lrf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_small_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_small_lrf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_base_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_base_lrf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_large_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, use_post_norm=True, use_overlap_down=True, layerscale_value=0.0001, **kwargs) return _create_focalnet('focalnet_large_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_large_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_overlap_down=True, layerscale_value=0.0001, **kwargs) return _create_focalnet('focalnet_large_fl4', pretrained=pretrained, **model_kwargs) @register_model def focalnet_xlarge_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, use_post_norm=True, use_overlap_down=True, layerscale_value=0.0001, **kwargs) return _create_focalnet('focalnet_xlarge_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_xlarge_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_overlap_down=True, layerscale_value=0.0001, **kwargs) return _create_focalnet('focalnet_xlarge_fl4', pretrained=pretrained, **model_kwargs) @register_model def focalnet_huge_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[3, 3, 3, 3], focal_windows=[3] * 4, use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=0.0001, **kwargs) return _create_focalnet('focalnet_huge_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_huge_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=0.0001, **kwargs) return _create_focalnet('focalnet_huge_fl4', pretrained=pretrained, **model_kwargs) # File: pytorch-image-models-main/timm/models/gcvit.py """""" import math from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d, get_attn, get_act_layer, get_norm_layer, RelPosBias, _assert from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['GlobalContextVit'] class MbConvBlock(nn.Module): def __init__(self, in_chs, out_chs=None, expand_ratio=1.0, attn_layer='se', bias=False, act_layer=nn.GELU): super().__init__() attn_kwargs = dict(act_layer=act_layer) if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca': attn_kwargs['rd_ratio'] = 0.25 attn_kwargs['bias'] = False attn_layer = get_attn(attn_layer) out_chs = out_chs or in_chs mid_chs = int(expand_ratio * in_chs) self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias) self.act = act_layer() self.se = attn_layer(mid_chs, **attn_kwargs) self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias) def forward(self, x): shortcut = x x = self.conv_dw(x) x = self.act(x) x = self.se(x) x = self.conv_pw(x) x = x + shortcut return x class Downsample2d(nn.Module): def __init__(self, dim, dim_out=None, reduction='conv', act_layer=nn.GELU, norm_layer=LayerNorm2d): super().__init__() dim_out = dim_out or dim self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity() self.conv_block = MbConvBlock(dim, act_layer=act_layer) assert reduction in ('conv', 'max', 'avg') if reduction == 'conv': self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False) elif reduction == 'max': assert dim == dim_out self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: assert dim == dim_out self.reduction = nn.AvgPool2d(kernel_size=2) self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity() def forward(self, x): x = self.norm1(x) x = self.conv_block(x) x = self.reduction(x) x = self.norm2(x) return x class FeatureBlock(nn.Module): def __init__(self, dim, levels=0, reduction='max', act_layer=nn.GELU): super().__init__() reductions = levels levels = max(1, levels) if reduction == 'avg': pool_fn = partial(nn.AvgPool2d, kernel_size=2) else: pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1) self.blocks = nn.Sequential() for i in range(levels): self.blocks.add_module(f'conv{i + 1}', MbConvBlock(dim, act_layer=act_layer)) if reductions: self.blocks.add_module(f'pool{i + 1}', pool_fn()) reductions -= 1 def forward(self, x): return self.blocks(x) class Stem(nn.Module): def __init__(self, in_chs: int=3, out_chs: int=96, act_layer: Callable=nn.GELU, norm_layer: Callable=LayerNorm2d): super().__init__() self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1) self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer) def forward(self, x): x = self.conv1(x) x = self.down(x) return x class WindowAttentionGlobal(nn.Module): def __init__(self, dim: int, num_heads: int, window_size: Tuple[int, int], use_global: bool=True, qkv_bias: bool=True, attn_drop: float=0.0, proj_drop: float=0.0): super().__init__() window_size = to_2tuple(window_size) self.window_size = window_size self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** (-0.5) self.use_global = use_global self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads) if self.use_global: self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias) else: self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, q_global: Optional[torch.Tensor]=None): (B, N, C) = x.shape if self.use_global and q_global is not None: _assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal') kv = self.qkv(x) kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (k, v) = kv.unbind(0) q = q_global.repeat(B // q_global.shape[0], 1, 1, 1) q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) else: qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) q = q * self.scale attn = q @ k.transpose(-2, -1).contiguous() attn = self.rel_pos(attn) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def window_partition(x, window_size: Tuple[int, int]): (B, H, W, C) = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): (H, W) = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class GlobalContextVitBlock(nn.Module): def __init__(self, dim: int, feat_size: Tuple[int, int], num_heads: int, window_size: int=7, mlp_ratio: float=4.0, use_global: bool=True, qkv_bias: bool=True, layer_scale: Optional[float]=None, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, attn_layer: Callable=WindowAttentionGlobal, act_layer: Callable=nn.GELU, norm_layer: Callable=nn.LayerNorm): super().__init__() feat_size = to_2tuple(feat_size) window_size = to_2tuple(window_size) self.window_size = window_size self.num_windows = int(feat_size[0] // window_size[0] * (feat_size[1] // window_size[1])) self.norm1 = norm_layer(dim) self.attn = attn_layer(dim, num_heads=num_heads, window_size=window_size, use_global=use_global, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def _window_attn(self, x, q_global: Optional[torch.Tensor]=None): (B, H, W, C) = x.shape x_win = window_partition(x, self.window_size) x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C) attn_win = self.attn(x_win, q_global) x = window_reverse(attn_win, self.window_size, (H, W)) return x def forward(self, x, q_global: Optional[torch.Tensor]=None): x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class GlobalContextVitStage(nn.Module): def __init__(self, dim, depth: int, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], downsample: bool=True, global_norm: bool=False, stage_norm: bool=False, mlp_ratio: float=4.0, qkv_bias: bool=True, layer_scale: Optional[float]=None, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: Union[List[float], float]=0.0, act_layer: Callable=nn.GELU, norm_layer: Callable=nn.LayerNorm, norm_layer_cl: Callable=LayerNorm2d): super().__init__() if downsample: self.downsample = Downsample2d(dim=dim, dim_out=dim * 2, norm_layer=norm_layer) dim = dim * 2 feat_size = (feat_size[0] // 2, feat_size[1] // 2) else: self.downsample = nn.Identity() self.feat_size = feat_size window_size = to_2tuple(window_size) feat_levels = int(math.log2(min(feat_size) / min(window_size))) self.global_block = FeatureBlock(dim, feat_levels) self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity() self.blocks = nn.ModuleList([GlobalContextVitBlock(dim=dim, num_heads=num_heads, feat_size=feat_size, window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, use_global=i % 2 != 0, layer_scale=layer_scale, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, act_layer=act_layer, norm_layer=norm_layer_cl) for i in range(depth)]) self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity() self.dim = dim self.feat_size = feat_size self.grad_checkpointing = False def forward(self, x): x = self.downsample(x) global_query = self.global_block(x) x = x.permute(0, 2, 3, 1) global_query = self.global_norm(global_query.permute(0, 2, 3, 1)) for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint.checkpoint(blk, x) else: x = blk(x, global_query) x = self.norm(x) x = x.permute(0, 3, 1, 2).contiguous() return x class GlobalContextVit(nn.Module): def __init__(self, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', img_size: Tuple[int, int]=224, window_ratio: Tuple[int, ...]=(32, 32, 16, 32), window_size: Tuple[int, ...]=None, embed_dim: int=64, depths: Tuple[int, ...]=(3, 4, 19, 5), num_heads: Tuple[int, ...]=(2, 4, 8, 16), mlp_ratio: float=3.0, qkv_bias: bool=True, layer_scale: Optional[float]=None, drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, weight_init='', act_layer: str='gelu', norm_layer: str='layernorm2d', norm_layer_cl: str='layernorm', norm_eps: float=1e-05): super().__init__() act_layer = get_act_layer(act_layer) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps) img_size = to_2tuple(img_size) feat_size = tuple((d // 4 for d in img_size)) self.global_pool = global_pool self.num_classes = num_classes self.drop_rate = drop_rate num_stages = len(depths) self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (num_stages - 1)) if window_size is not None: window_size = to_ntuple(num_stages)(window_size) else: assert window_ratio is not None window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)]) self.stem = Stem(in_chs=in_chans, out_chs=embed_dim, act_layer=act_layer, norm_layer=norm_layer) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] for i in range(num_stages): last_stage = i == num_stages - 1 stage_scale = 2 ** max(i - 1, 0) stages.append(GlobalContextVitStage(dim=embed_dim * stage_scale, depth=depths[i], num_heads=num_heads[i], feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale), window_size=window_size[i], downsample=i != 0, stage_norm=last_stage, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, layer_scale=layer_scale, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], act_layer=act_layer, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl)) self.stages = nn.Sequential(*stages) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) if weight_init: named_apply(partial(self._init_weights, scheme=weight_init), self) def _init_weights(self, module, name, scheme='vit'): if scheme == 'vit': if isinstance(module, nn.Linear): nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-06) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) @torch.jit.ignore def no_weight_decay(self): return {k for (k, _) in self.named_parameters() if any((n in k for n in ['relative_position_bias_table', 'rel_pos.mlp']))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks='^stages\\.(\\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is None: global_pool = self.head.global_pool.pool_type self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.stem(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _create_gcvit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs} default_cfgs = generate_default_cfgs({'gcvit_xxtiny.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'), 'gcvit_xtiny.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'), 'gcvit_tiny.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'), 'gcvit_small.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'), 'gcvit_base.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth')}) @register_model def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict(depths=(2, 2, 6, 2), num_heads=(2, 4, 8, 16), **kwargs) return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs) @register_model def gcvit_xtiny(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict(depths=(3, 4, 6, 5), num_heads=(2, 4, 8, 16), **kwargs) return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs) @register_model def gcvit_tiny(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict(depths=(3, 4, 19, 5), num_heads=(2, 4, 8, 16), **kwargs) return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs) @register_model def gcvit_small(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict(depths=(3, 4, 19, 5), num_heads=(3, 6, 12, 24), embed_dim=96, mlp_ratio=2, layer_scale=1e-05, **kwargs) return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs) @register_model def gcvit_base(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict(depths=(3, 4, 19, 5), num_heads=(4, 8, 16, 32), embed_dim=128, mlp_ratio=2, layer_scale=1e-05, **kwargs) return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs) # File: pytorch-image-models-main/timm/models/ghostnet.py """""" import math from functools import partial from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, Linear, make_divisible from ._builder import build_model_with_cfg from ._efficientnet_blocks import SqueezeExcite, ConvBnAct from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['GhostNet'] _SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) class GhostModule(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=1, ratio=2, dw_size=3, stride=1, use_act=True, act_layer=nn.ReLU): super(GhostModule, self).__init__() self.out_chs = out_chs init_chs = math.ceil(out_chs / ratio) new_chs = init_chs * (ratio - 1) self.primary_conv = nn.Sequential(nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), act_layer(inplace=True) if use_act else nn.Identity()) self.cheap_operation = nn.Sequential(nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs), act_layer(inplace=True) if use_act else nn.Identity()) def forward(self, x): x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) out = torch.cat([x1, x2], dim=1) return out[:, :self.out_chs, :, :] class GhostModuleV2(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=1, ratio=2, dw_size=3, stride=1, use_act=True, act_layer=nn.ReLU): super().__init__() self.gate_fn = nn.Sigmoid() self.out_chs = out_chs init_chs = math.ceil(out_chs / ratio) new_chs = init_chs * (ratio - 1) self.primary_conv = nn.Sequential(nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), act_layer(inplace=True) if use_act else nn.Identity()) self.cheap_operation = nn.Sequential(nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs), act_layer(inplace=True) if use_act else nn.Identity()) self.short_conv = nn.Sequential(nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(out_chs), nn.Conv2d(out_chs, out_chs, kernel_size=(1, 5), stride=1, padding=(0, 2), groups=out_chs, bias=False), nn.BatchNorm2d(out_chs), nn.Conv2d(out_chs, out_chs, kernel_size=(5, 1), stride=1, padding=(2, 0), groups=out_chs, bias=False), nn.BatchNorm2d(out_chs)) def forward(self, x): res = self.short_conv(F.avg_pool2d(x, kernel_size=2, stride=2)) x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) out = torch.cat([x1, x2], dim=1) return out[:, :self.out_chs, :, :] * F.interpolate(self.gate_fn(res), size=(out.shape[-2], out.shape[-1]), mode='nearest') class GhostBottleneck(nn.Module): def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, stride=1, act_layer=nn.ReLU, se_ratio=0.0, mode='original'): super(GhostBottleneck, self).__init__() has_se = se_ratio is not None and se_ratio > 0.0 self.stride = stride if mode == 'original': self.ghost1 = GhostModule(in_chs, mid_chs, use_act=True, act_layer=act_layer) else: self.ghost1 = GhostModuleV2(in_chs, mid_chs, use_act=True, act_layer=act_layer) if self.stride > 1: self.conv_dw = nn.Conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=mid_chs, bias=False) self.bn_dw = nn.BatchNorm2d(mid_chs) else: self.conv_dw = None self.bn_dw = None self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None self.ghost2 = GhostModule(mid_chs, out_chs, use_act=False) if in_chs == out_chs and self.stride == 1: self.shortcut = nn.Sequential() else: self.shortcut = nn.Sequential(nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False), nn.BatchNorm2d(in_chs), nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_chs)) def forward(self, x): shortcut = x x = self.ghost1(x) if self.conv_dw is not None: x = self.conv_dw(x) x = self.bn_dw(x) if self.se is not None: x = self.se(x) x = self.ghost2(x) x += self.shortcut(shortcut) return x class GhostNet(nn.Module): def __init__(self, cfgs, num_classes=1000, width=1.0, in_chans=3, output_stride=32, global_pool='avg', drop_rate=0.2, version='v1'): super(GhostNet, self).__init__() assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' self.cfgs = cfgs self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] stem_chs = make_divisible(16 * width, 4) self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) self.bn1 = nn.BatchNorm2d(stem_chs) self.act1 = nn.ReLU(inplace=True) prev_chs = stem_chs stages = nn.ModuleList([]) stage_idx = 0 layer_idx = 0 net_stride = 2 for cfg in self.cfgs: layers = [] s = 1 for (k, exp_size, c, se_ratio, s) in cfg: out_chs = make_divisible(c * width, 4) mid_chs = make_divisible(exp_size * width, 4) layer_kwargs = {} if version == 'v2' and layer_idx > 1: layer_kwargs['mode'] = 'attn' layers.append(GhostBottleneck(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, **layer_kwargs)) prev_chs = out_chs layer_idx += 1 if s > 1: net_stride *= 2 self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) stages.append(nn.Sequential(*layers)) stage_idx += 1 out_chs = make_divisible(exp_size * width, 4) stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) self.pool_dim = prev_chs = out_chs self.blocks = nn.Sequential(*stages) self.num_features = prev_chs self.head_hidden_size = out_chs = 1280 self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) self.act2 = nn.ReLU(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^conv_stem|bn1', blocks=[('^blocks\\.(\\d+)' if coarse else '^blocks\\.(\\d+)\\.(\\d+)', None), ('conv_head', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model: nn.Module): out_dict = {} for (k, v) in state_dict.items(): if 'total' in k: continue out_dict[k] = v return out_dict def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): cfgs = [[[3, 16, 16, 0, 1]], [[3, 48, 24, 0, 2]], [[3, 72, 24, 0, 1]], [[5, 72, 40, 0.25, 2]], [[5, 120, 40, 0.25, 1]], [[3, 240, 80, 0, 2]], [[3, 200, 80, 0, 1], [3, 184, 80, 0, 1], [3, 184, 80, 0, 1], [3, 480, 112, 0.25, 1], [3, 672, 112, 0.25, 1]], [[5, 672, 160, 0.25, 2]], [[5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1], [5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1]]] model_kwargs = dict(cfgs=cfgs, width=width, **kwargs) return build_model_with_cfg(GhostNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True), **model_kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'ghostnet_050.untrained': _cfg(), 'ghostnet_100.in1k': _cfg(hf_hub_id='timm/'), 'ghostnet_130.untrained': _cfg(), 'ghostnetv2_100.in1k': _cfg(hf_hub_id='timm/'), 'ghostnetv2_130.in1k': _cfg(hf_hub_id='timm/'), 'ghostnetv2_160.in1k': _cfg(hf_hub_id='timm/')}) @register_model def ghostnet_050(pretrained=False, **kwargs) -> GhostNet: model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) return model @register_model def ghostnet_100(pretrained=False, **kwargs) -> GhostNet: model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) return model @register_model def ghostnet_130(pretrained=False, **kwargs) -> GhostNet: model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) return model @register_model def ghostnetv2_100(pretrained=False, **kwargs) -> GhostNet: model = _create_ghostnet('ghostnetv2_100', width=1.0, pretrained=pretrained, version='v2', **kwargs) return model @register_model def ghostnetv2_130(pretrained=False, **kwargs) -> GhostNet: model = _create_ghostnet('ghostnetv2_130', width=1.3, pretrained=pretrained, version='v2', **kwargs) return model @register_model def ghostnetv2_160(pretrained=False, **kwargs) -> GhostNet: model = _create_ghostnet('ghostnetv2_160', width=1.6, pretrained=pretrained, version='v2', **kwargs) return model # File: pytorch-image-models-main/timm/models/hardcorenas.py from functools import partial import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._builder import pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels from ._registry import register_model, generate_default_cfgs from .mobilenetv3 import MobileNetV3, MobileNetV3Features __all__ = [] def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): num_features = 1280 se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=32, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=se_layer, **kwargs) features_only = False model_cls = MobileNetV3 kwargs_filter = None if model_kwargs.pop('features_only', False): features_only = True kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') model_cls = MobileNetV3Features model = build_model_with_cfg(model_cls, variant, pretrained, pretrained_strict=not features_only, kwargs_filter=kwargs_filter, **model_kwargs) if features_only: model.default_cfg = pretrained_cfg_for_features(model.default_cfg) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/')}) @register_model def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3: arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3: arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3: arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3: arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3: arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3: arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) return model # File: pytorch-image-models-main/timm/models/hgnet.py """""" from typing import Dict, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, DropPath, create_conv2d from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from ._manipulate import checkpoint_seq __all__ = ['HighPerfGpuNet'] class LearnableAffineBlock(nn.Module): def __init__(self, scale_value=1.0, bias_value=0.0): super().__init__() self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True) self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True) def forward(self, x): return self.scale * x + self.bias class ConvBNAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride=1, groups=1, padding='', use_act=True, use_lab=False): super().__init__() self.use_act = use_act self.use_lab = use_lab self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=padding, groups=groups) self.bn = nn.BatchNorm2d(out_chs) if self.use_act: self.act = nn.ReLU() else: self.act = nn.Identity() if self.use_act and self.use_lab: self.lab = LearnableAffineBlock() else: self.lab = nn.Identity() def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.lab(x) return x class LightConvBNAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, groups=1, use_lab=False): super().__init__() self.conv1 = ConvBNAct(in_chs, out_chs, kernel_size=1, use_act=False, use_lab=use_lab) self.conv2 = ConvBNAct(out_chs, out_chs, kernel_size=kernel_size, groups=out_chs, use_act=True, use_lab=use_lab) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class EseModule(nn.Module): def __init__(self, chs): super().__init__() self.conv = nn.Conv2d(chs, chs, kernel_size=1, stride=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): identity = x x = x.mean((2, 3), keepdim=True) x = self.conv(x) x = self.sigmoid(x) return torch.mul(identity, x) class StemV1(nn.Module): def __init__(self, stem_chs): super().__init__() self.stem = nn.Sequential(*[ConvBNAct(stem_chs[i], stem_chs[i + 1], kernel_size=3, stride=2 if i == 0 else 1) for i in range(len(stem_chs) - 1)]) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.stem(x) x = self.pool(x) return x class StemV2(nn.Module): def __init__(self, in_chs, mid_chs, out_chs, use_lab=False): super().__init__() self.stem1 = ConvBNAct(in_chs, mid_chs, kernel_size=3, stride=2, use_lab=use_lab) self.stem2a = ConvBNAct(mid_chs, mid_chs // 2, kernel_size=2, stride=1, use_lab=use_lab) self.stem2b = ConvBNAct(mid_chs // 2, mid_chs, kernel_size=2, stride=1, use_lab=use_lab) self.stem3 = ConvBNAct(mid_chs * 2, mid_chs, kernel_size=3, stride=2, use_lab=use_lab) self.stem4 = ConvBNAct(mid_chs, out_chs, kernel_size=1, stride=1, use_lab=use_lab) self.pool = nn.MaxPool2d(kernel_size=2, stride=1, ceil_mode=True) def forward(self, x): x = self.stem1(x) x = F.pad(x, (0, 1, 0, 1)) x2 = self.stem2a(x) x2 = F.pad(x2, (0, 1, 0, 1)) x2 = self.stem2b(x2) x1 = self.pool(x) x = torch.cat([x1, x2], dim=1) x = self.stem3(x) x = self.stem4(x) return x class HighPerfGpuBlock(nn.Module): def __init__(self, in_chs, mid_chs, out_chs, layer_num, kernel_size=3, residual=False, light_block=False, use_lab=False, agg='ese', drop_path=0.0): super().__init__() self.residual = residual self.layers = nn.ModuleList() for i in range(layer_num): if light_block: self.layers.append(LightConvBNAct(in_chs if i == 0 else mid_chs, mid_chs, kernel_size=kernel_size, use_lab=use_lab)) else: self.layers.append(ConvBNAct(in_chs if i == 0 else mid_chs, mid_chs, kernel_size=kernel_size, stride=1, use_lab=use_lab)) total_chs = in_chs + layer_num * mid_chs if agg == 'se': aggregation_squeeze_conv = ConvBNAct(total_chs, out_chs // 2, kernel_size=1, stride=1, use_lab=use_lab) aggregation_excitation_conv = ConvBNAct(out_chs // 2, out_chs, kernel_size=1, stride=1, use_lab=use_lab) self.aggregation = nn.Sequential(aggregation_squeeze_conv, aggregation_excitation_conv) else: aggregation_conv = ConvBNAct(total_chs, out_chs, kernel_size=1, stride=1, use_lab=use_lab) att = EseModule(out_chs) self.aggregation = nn.Sequential(aggregation_conv, att) self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() def forward(self, x): identity = x output = [x] for layer in self.layers: x = layer(x) output.append(x) x = torch.cat(output, dim=1) x = self.aggregation(x) if self.residual: x = self.drop_path(x) + identity return x class HighPerfGpuStage(nn.Module): def __init__(self, in_chs, mid_chs, out_chs, block_num, layer_num, downsample=True, stride=2, light_block=False, kernel_size=3, use_lab=False, agg='ese', drop_path=0.0): super().__init__() self.downsample = downsample if downsample: self.downsample = ConvBNAct(in_chs, in_chs, kernel_size=3, stride=stride, groups=in_chs, use_act=False, use_lab=use_lab) else: self.downsample = nn.Identity() blocks_list = [] for i in range(block_num): blocks_list.append(HighPerfGpuBlock(in_chs if i == 0 else out_chs, mid_chs, out_chs, layer_num, residual=False if i == 0 else True, kernel_size=kernel_size, light_block=light_block, use_lab=use_lab, agg=agg, drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path)) self.blocks = nn.Sequential(*blocks_list) self.grad_checkpointing = False def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x, flatten=False) else: x = self.blocks(x) return x class ClassifierHead(nn.Module): def __init__(self, in_features: int, num_classes: int, pool_type: str='avg', drop_rate: float=0.0, hidden_size: Optional[int]=2048, use_lab: bool=False): super(ClassifierHead, self).__init__() self.num_features = in_features if pool_type is not None: if not pool_type: assert num_classes == 0, 'Classifier head must be removed if pooling is disabled' self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) if hidden_size is not None: self.num_features = hidden_size last_conv = nn.Conv2d(in_features, hidden_size, kernel_size=1, stride=1, padding=0, bias=False) act = nn.ReLU() if use_lab: lab = LearnableAffineBlock() self.last_conv = nn.Sequential(last_conv, act, lab) else: self.last_conv = nn.Sequential(last_conv, act) else: self.last_conv = nn.Identity() self.dropout = nn.Dropout(drop_rate) self.flatten = nn.Flatten(1) if pool_type else nn.Identity() self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def reset(self, num_classes: int, pool_type: Optional[str]=None): if pool_type is not None: if not pool_type: assert num_classes == 0, 'Classifier head must be removed if pooling is disabled' self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) self.flatten = nn.Flatten(1) if pool_type else nn.Identity() self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.last_conv(x) x = self.dropout(x) x = self.flatten(x) if pre_logits: return x x = self.fc(x) return x class HighPerfGpuNet(nn.Module): def __init__(self, cfg: Dict, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', head_hidden_size: Optional[int]=2048, drop_rate: float=0.0, drop_path_rate: float=0.0, use_lab: bool=False, **kwargs): super(HighPerfGpuNet, self).__init__() stem_type = cfg['stem_type'] stem_chs = cfg['stem_chs'] stages_cfg = [cfg['stage1'], cfg['stage2'], cfg['stage3'], cfg['stage4']] self.num_classes = num_classes self.drop_rate = drop_rate self.use_lab = use_lab assert stem_type in ['v1', 'v2'] if stem_type == 'v2': self.stem = StemV2(in_chs=in_chans, mid_chs=stem_chs[0], out_chs=stem_chs[1], use_lab=use_lab) else: self.stem = StemV1([in_chans] + stem_chs) current_stride = 4 stages = [] self.feature_info = [] block_depths = [c[3] for c in stages_cfg] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(block_depths)).split(block_depths)] for (i, stage_config) in enumerate(stages_cfg): (in_chs, mid_chs, out_chs, block_num, downsample, light_block, kernel_size, layer_num) = stage_config stages += [HighPerfGpuStage(in_chs=in_chs, mid_chs=mid_chs, out_chs=out_chs, block_num=block_num, layer_num=layer_num, downsample=downsample, light_block=light_block, kernel_size=kernel_size, use_lab=use_lab, agg='ese' if stem_type == 'v1' else 'se', drop_path=dpr[i])] self.num_features = out_chs if downsample: current_stride *= 2 self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.head = ClassifierHead(self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate, hidden_size=head_hidden_size, use_lab=use_lab) self.head_hidden_size = self.head.num_features for (n, m) in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else '^stages\\.(\\d+).blocks\\.(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) return self.stages(x) def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x model_cfgs = dict(hgnet_tiny={'stem_type': 'v1', 'stem_chs': [48, 48, 96], 'stage1': [96, 96, 224, 1, False, False, 3, 5], 'stage2': [224, 128, 448, 1, True, False, 3, 5], 'stage3': [448, 160, 512, 2, True, False, 3, 5], 'stage4': [512, 192, 768, 1, True, False, 3, 5]}, hgnet_small={'stem_type': 'v1', 'stem_chs': [64, 64, 128], 'stage1': [128, 128, 256, 1, False, False, 3, 6], 'stage2': [256, 160, 512, 1, True, False, 3, 6], 'stage3': [512, 192, 768, 2, True, False, 3, 6], 'stage4': [768, 224, 1024, 1, True, False, 3, 6]}, hgnet_base={'stem_type': 'v1', 'stem_chs': [96, 96, 160], 'stage1': [160, 192, 320, 1, False, False, 3, 7], 'stage2': [320, 224, 640, 2, True, False, 3, 7], 'stage3': [640, 256, 960, 3, True, False, 3, 7], 'stage4': [960, 288, 1280, 2, True, False, 3, 7]}, hgnetv2_b0={'stem_type': 'v2', 'stem_chs': [16, 16], 'stage1': [16, 16, 64, 1, False, False, 3, 3], 'stage2': [64, 32, 256, 1, True, False, 3, 3], 'stage3': [256, 64, 512, 2, True, True, 5, 3], 'stage4': [512, 128, 1024, 1, True, True, 5, 3]}, hgnetv2_b1={'stem_type': 'v2', 'stem_chs': [24, 32], 'stage1': [32, 32, 64, 1, False, False, 3, 3], 'stage2': [64, 48, 256, 1, True, False, 3, 3], 'stage3': [256, 96, 512, 2, True, True, 5, 3], 'stage4': [512, 192, 1024, 1, True, True, 5, 3]}, hgnetv2_b2={'stem_type': 'v2', 'stem_chs': [24, 32], 'stage1': [32, 32, 96, 1, False, False, 3, 4], 'stage2': [96, 64, 384, 1, True, False, 3, 4], 'stage3': [384, 128, 768, 3, True, True, 5, 4], 'stage4': [768, 256, 1536, 1, True, True, 5, 4]}, hgnetv2_b3={'stem_type': 'v2', 'stem_chs': [24, 32], 'stage1': [32, 32, 128, 1, False, False, 3, 5], 'stage2': [128, 64, 512, 1, True, False, 3, 5], 'stage3': [512, 128, 1024, 3, True, True, 5, 5], 'stage4': [1024, 256, 2048, 1, True, True, 5, 5]}, hgnetv2_b4={'stem_type': 'v2', 'stem_chs': [32, 48], 'stage1': [48, 48, 128, 1, False, False, 3, 6], 'stage2': [128, 96, 512, 1, True, False, 3, 6], 'stage3': [512, 192, 1024, 3, True, True, 5, 6], 'stage4': [1024, 384, 2048, 1, True, True, 5, 6]}, hgnetv2_b5={'stem_type': 'v2', 'stem_chs': [32, 64], 'stage1': [64, 64, 128, 1, False, False, 3, 6], 'stage2': [128, 128, 512, 2, True, False, 3, 6], 'stage3': [512, 256, 1024, 5, True, True, 5, 6], 'stage4': [1024, 512, 2048, 2, True, True, 5, 6]}, hgnetv2_b6={'stem_type': 'v2', 'stem_chs': [48, 96], 'stage1': [96, 96, 192, 2, False, False, 3, 6], 'stage2': [192, 192, 512, 3, True, False, 3, 6], 'stage3': [512, 384, 1024, 6, True, True, 5, 6], 'stage4': [1024, 768, 2048, 3, True, True, 5, 6]}) def _create_hgnet(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) return build_model_with_cfg(HighPerfGpuNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.965, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head.fc', 'first_conv': 'stem.stem1.conv', 'test_crop_pct': 1.0, 'test_input_size': (3, 288, 288), **kwargs} default_cfgs = generate_default_cfgs({'hgnet_tiny.paddle_in1k': _cfg(first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_tiny.ssld_in1k': _cfg(first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_small.paddle_in1k': _cfg(first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_small.ssld_in1k': _cfg(first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnet_base.ssld_in1k': _cfg(first_conv='stem.stem.0.conv', hf_hub_id='timm/'), 'hgnetv2_b0.ssld_stage2_ft_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b0.ssld_stage1_in22k_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b1.ssld_stage2_ft_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b1.ssld_stage1_in22k_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b2.ssld_stage2_ft_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b2.ssld_stage1_in22k_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b3.ssld_stage2_ft_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b3.ssld_stage1_in22k_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b4.ssld_stage2_ft_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b4.ssld_stage1_in22k_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b5.ssld_stage2_ft_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b5.ssld_stage1_in22k_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b6.ssld_stage2_ft_in1k': _cfg(hf_hub_id='timm/'), 'hgnetv2_b6.ssld_stage1_in22k_in1k': _cfg(hf_hub_id='timm/')}) @register_model def hgnet_tiny(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnet_tiny', pretrained=pretrained, **kwargs) @register_model def hgnet_small(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnet_small', pretrained=pretrained, **kwargs) @register_model def hgnet_base(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnet_base', pretrained=pretrained, **kwargs) @register_model def hgnetv2_b0(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b0', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b1(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b1', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b2(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b2', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b3(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b3', pretrained=pretrained, use_lab=True, **kwargs) @register_model def hgnetv2_b4(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b4', pretrained=pretrained, **kwargs) @register_model def hgnetv2_b5(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b5', pretrained=pretrained, **kwargs) @register_model def hgnetv2_b6(pretrained=False, **kwargs) -> HighPerfGpuNet: return _create_hgnet('hgnetv2_b6', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/hiera.py """""" import math from functools import partial from typing import Callable, Dict, List, Optional, Tuple, Type, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, LayerScale, ClNormMlpClassifierHead, use_fused_attn, _assert, get_norm_layer, to_2tuple, init_weight_vit, init_weight_jax from ._registry import generate_default_cfgs, register_model from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import named_apply __all__ = ['Hiera'] def conv_nd(n: int) -> Type[nn.Module]: return [nn.Identity, nn.Conv1d, nn.Conv2d, nn.Conv3d][n] @register_notrace_function def get_resized_mask(target_size: List[int], mask: torch.Tensor) -> torch.Tensor: if mask is None: return mask _assert(len(mask.shape[2:]) == len(target_size), 'mask spatial shape and target_size must match.') if mask.shape[2:] != target_size: return F.interpolate(mask.float(), size=target_size) return mask def undo_windowing(x: torch.Tensor, shape: List[int], mu_shape: List[int]) -> torch.Tensor: D = len(shape) (B, C) = (x.shape[0], x.shape[-1]) num_MUs = [s // mu for (s, mu) in zip(shape, mu_shape)] x = x.view(B, *num_MUs, *mu_shape, C) permute = [0] + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D, 1 + 2 * D))], []) + [len(x.shape) - 1] x = x.permute(permute).reshape(B, *shape, C) return x class Unroll(nn.Module): def __init__(self, input_size: Tuple[int, ...], patch_stride: Tuple[int, ...], unroll_schedule: List[Tuple[int, ...]]): super().__init__() self.size = [i // s for (i, s) in zip(input_size, patch_stride)] self.schedule = unroll_schedule def forward(self, x: torch.Tensor) -> torch.Tensor: (B, _, C) = x.shape cur_size = self.size x = x.view(*[B] + cur_size + [C]) for strides in self.schedule: cur_size = [i // s for (i, s) in zip(cur_size, strides)] new_shape = [B] + sum([[i, s] for (i, s) in zip(cur_size, strides)], []) + [C] x = x.view(new_shape) L = len(new_shape) permute = [0] + list(range(2, L - 1, 2)) + list(range(1, L - 1, 2)) + [L - 1] x = x.permute(permute) x = x.flatten(0, len(strides)) B *= math.prod(strides) x = x.reshape(-1, math.prod(self.size), C) return x class Reroll(nn.Module): def __init__(self, input_size: Tuple[int, ...], patch_stride: Tuple[int, ...], unroll_schedule: List[Tuple[int, ...]], stage_ends: List[int], q_pool: int): super().__init__() self.size = [i // s for (i, s) in zip(input_size, patch_stride)] self.schedule = {} size = self.size for i in range(stage_ends[-1] + 1): self.schedule[i] = (unroll_schedule, size) if i in stage_ends[:q_pool]: if len(unroll_schedule) > 0: size = [n // s for (n, s) in zip(size, unroll_schedule[0])] unroll_schedule = unroll_schedule[1:] def forward(self, x: torch.Tensor, block_idx: int, mask: torch.Tensor=None) -> torch.Tensor: (schedule, size) = self.schedule[block_idx] (B, N, C) = x.shape D = len(size) cur_mu_shape = [1] * D for strides in schedule: x = x.view(B, *strides, N // math.prod(strides), *cur_mu_shape, C) L = len(x.shape) permute = [0, 1 + D] + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D + 1, L - 1))], []) + [L - 1] x = x.permute(permute) for i in range(D): cur_mu_shape[i] *= strides[i] x = x.reshape(B, -1, *cur_mu_shape, C) N = x.shape[1] x = x.view(B, N, *cur_mu_shape, C) if mask is not None: return x x = undo_windowing(x, size, cur_mu_shape) return x class MaskUnitAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim: int, dim_out: int, heads: int, q_stride: int=1, window_size: int=0, use_mask_unit_attn: bool=False): super().__init__() self.dim = dim self.dim_out = dim_out self.heads = heads self.q_stride = q_stride self.head_dim = dim_out // heads self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3 * dim_out) self.proj = nn.Linear(dim_out, dim_out) self.window_size = window_size self.use_mask_unit_attn = use_mask_unit_attn def forward(self, x: torch.Tensor) -> torch.Tensor: (B, N, _) = x.shape num_windows = N // (self.q_stride * self.window_size) if self.use_mask_unit_attn else 1 qkv = self.qkv(x).reshape(B, -1, num_windows, 3, self.heads, self.head_dim).permute(3, 0, 4, 2, 1, 5) (q, k, v) = qkv.unbind(0) if self.q_stride > 1: q = q.view(B, self.heads, num_windows, self.q_stride, -1, self.head_dim).amax(dim=3) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: attn = q * self.scale @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 3).reshape(B, -1, self.dim_out) x = self.proj(x) return x class HieraBlock(nn.Module): def __init__(self, dim: int, dim_out: int, heads: int, mlp_ratio: float=4.0, drop_path: float=0.0, init_values: Optional[float]=None, norm_layer: nn.Module=nn.LayerNorm, act_layer: nn.Module=nn.GELU, q_stride: int=1, window_size: int=0, use_expand_proj: bool=True, use_mask_unit_attn: bool=False): super().__init__() self.dim = dim self.dim_out = dim_out self.norm1 = norm_layer(dim) if dim != dim_out: self.do_expand = True if use_expand_proj: self.proj = nn.Linear(dim, dim_out) else: assert dim_out == dim * 2 self.proj = None else: self.do_expand = False self.proj = None self.attn = MaskUnitAttention(dim, dim_out, heads, q_stride, window_size, use_mask_unit_attn) self.ls1 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0 else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(dim_out * mlp_ratio), act_layer=act_layer) self.ls2 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0 else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: x_norm = self.norm1(x) if self.do_expand: if self.proj is not None: x = self.proj(x_norm) x = x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1) else: x = torch.cat([x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1), x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).mean(dim=1)], dim=-1) x = x + self.drop_path1(self.ls1(self.attn(x_norm))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class PatchEmbed(nn.Module): def __init__(self, dim_in: int, dim_out: int, kernel: Tuple[int, ...], stride: Tuple[int, ...], padding: Tuple[int, ...], reshape: bool=True): super().__init__() self.spatial_dims = len(kernel) self.reshape = reshape self.proj = conv_nd(self.spatial_dims)(dim_in, dim_out, kernel_size=kernel, stride=stride, padding=padding) def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None) -> torch.Tensor: if mask is not None: mask = get_resized_mask(target_size=x.shape[2:], mask=mask) x = self.proj(x * mask.to(torch.bool)) else: x = self.proj(x) if self.reshape: x = x.reshape(x.shape[0], x.shape[1], -1).transpose(2, 1) return x class Hiera(nn.Module): def __init__(self, img_size: Tuple[int, ...]=(224, 224), in_chans: int=3, embed_dim: int=96, num_heads: int=1, num_classes: int=1000, global_pool: str='avg', stages: Tuple[int, ...]=(2, 3, 16, 3), q_pool: int=3, q_stride: Tuple[int, ...]=(2, 2), mask_unit_size: Tuple[int, ...]=(8, 8), mask_unit_attn: Tuple[bool, ...]=(True, True, False, False), use_expand_proj: bool=True, dim_mul: float=2.0, head_mul: float=2.0, patch_kernel: Tuple[int, ...]=(7, 7), patch_stride: Tuple[int, ...]=(4, 4), patch_padding: Tuple[int, ...]=(3, 3), mlp_ratio: float=4.0, drop_path_rate: float=0.0, init_values: Optional[float]=None, fix_init: bool=True, weight_init: str='', norm_layer: Union[str, nn.Module]='LayerNorm', drop_rate: float=0.0, patch_drop_rate: float=0.0, head_init_scale: float=0.001, sep_pos_embed: bool=False, abs_win_pos_embed: bool=False, global_pos_size: Tuple[int, int]=(14, 14)): super().__init__() self.num_classes = num_classes self.grad_checkpointing = False norm_layer = get_norm_layer(norm_layer) if isinstance(img_size, int): img_size = to_2tuple(img_size) self.patch_stride = patch_stride self.tokens_spatial_shape = [i // s for (i, s) in zip(img_size, patch_stride)] num_tokens = math.prod(self.tokens_spatial_shape) flat_mu_size = math.prod(mask_unit_size) flat_q_stride = math.prod(q_stride) assert q_pool < len(stages) (self.q_pool, self.q_stride) = (q_pool, q_stride) (self.mu_size, self.mask_unit_size) = (flat_mu_size, mask_unit_size) self.mask_spatial_shape = [i // s for (i, s) in zip(self.tokens_spatial_shape, self.mask_unit_size)] self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] self.patch_drop_rate = patch_drop_rate self.patch_embed = PatchEmbed(in_chans, embed_dim, patch_kernel, patch_stride, patch_padding) self.pos_embed: Optional[nn.Parameter] = None self.pos_embed_win: Optional[nn.Parameter] = None self.pos_embed_spatial: Optional[nn.Parameter] = None self.pos_embed_temporal: Optional[nn.Parameter] = None if sep_pos_embed: self.pos_embed_spatial = nn.Parameter(torch.zeros(1, self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], embed_dim)) self.pos_embed_temporal = nn.Parameter(torch.zeros(1, self.tokens_spatial_shape[0], embed_dim)) elif abs_win_pos_embed: self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *global_pos_size)) self.pos_embed_win = nn.Parameter(torch.zeros(1, embed_dim, *mask_unit_size)) else: self.pos_embed = nn.Parameter(torch.zeros(1, num_tokens, embed_dim)) self.unroll = Unroll(img_size, patch_stride, [q_stride] * len(self.stage_ends[:-1])) self.reroll = Reroll(img_size, patch_stride, [q_stride] * len(self.stage_ends[:-1]), self.stage_ends, q_pool) q_pool_blocks = [x + 1 for x in self.stage_ends[:q_pool]] cur_stage = 0 depth = sum(stages) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.ModuleList() self.feature_info = [] for i in range(depth): dim_out = embed_dim use_mask_unit_attn = mask_unit_attn[cur_stage] if i - 1 in self.stage_ends: dim_out = int(embed_dim * dim_mul) num_heads = int(num_heads * head_mul) cur_stage += 1 if i in q_pool_blocks: flat_mu_size //= flat_q_stride block = HieraBlock(dim=embed_dim, dim_out=dim_out, heads=num_heads, mlp_ratio=mlp_ratio, drop_path=dpr[i], init_values=init_values, norm_layer=norm_layer, q_stride=flat_q_stride if i in q_pool_blocks else 1, window_size=flat_mu_size, use_expand_proj=use_expand_proj, use_mask_unit_attn=use_mask_unit_attn) embed_dim = dim_out if i in self.stage_ends: self.feature_info += [dict(num_chs=dim_out, reduction=2 ** (cur_stage + 2), module=f'blocks.{self.stage_ends[cur_stage]}')] self.blocks.append(block) self.num_features = self.head_hidden_size = embed_dim self.head = ClNormMlpClassifierHead(embed_dim, num_classes, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, input_fmt='NLC') if sep_pos_embed: nn.init.trunc_normal_(self.pos_embed_spatial, std=0.02) nn.init.trunc_normal_(self.pos_embed_temporal, std=0.02) else: if self.pos_embed is not None: nn.init.trunc_normal_(self.pos_embed, std=0.02) if self.pos_embed_win is not None: nn.init.trunc_normal_(self.pos_embed_win, std=0.02) if weight_init != 'skip': init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit init_fn = partial(init_fn, classifier_name='head.fc') named_apply(init_fn, self) if fix_init: self.fix_init_weight() if isinstance(self.head.fc, nn.Linear): self.head.fc.weight.data.mul_(head_init_scale) self.head.fc.bias.data.mul_(head_init_scale) def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for (layer_id, layer) in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) @torch.jit.ignore def no_weight_decay(self): if self.pos_embed is not None: return ['pos_embed'] elif self.pos_embed_abs is not None: return ['pos_embed_abs', 'pos_embed_win'] else: return ['pos_embed_spatial', 'pos_embed_temporal'] @torch.jit.ignore def group_matcher(self, coarse: bool=False) -> Dict: return dict(stem='^pos_embed|pos_embed_spatial|pos_embed_temporal|pos_embed_abs|pos_embed_win|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True) -> None: self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None, reset_other: bool=False): self.num_classes = num_classes self.head.reset(num_classes, global_pool, reset_other=reset_other) def get_random_mask(self, x: torch.Tensor, mask_ratio: float) -> torch.Tensor: B = x.shape[0] num_windows = math.prod(self.mask_spatial_shape) len_keep = int(num_windows * (1 - mask_ratio)) noise = torch.rand(B, num_windows, device=x.device) ids_shuffle = torch.argsort(noise, dim=1) ids_restore = torch.argsort(ids_shuffle, dim=1) mask = torch.zeros([B, num_windows], device=x.device) mask[:, :len_keep] = 1 mask = torch.gather(mask, dim=1, index=ids_restore) return mask.bool() def _pos_embed(self, x) -> torch.Tensor: if self.pos_embed_win is not None: pos_embed_win = self.pos_embed_win.tile(self.mask_spatial_shape) pos_embed = F.interpolate(self.pos_embed, size=pos_embed_win.shape[-2:], mode='bicubic', antialias=True) pos_embed = pos_embed + pos_embed_win pos_embed = pos_embed.flatten(2).transpose(1, 2) elif self.pos_embed is not None: pos_embed = self.pos_embed else: pos_embed = self.pos_embed_spatial.repeat(1, self.tokens_spatial_shape[0], 1) + torch.repeat_interleave(self.pos_embed_temporal, self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], dim=1) x = x + pos_embed return x def forward_intermediates(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=True, output_fmt: str='NCHW', intermediates_only: bool=False, coarse: bool=True) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert not norm, 'normalization of features not supported' assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.' if coarse: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] else: (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) if mask is not None: patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) else: patch_mask = None x = self.patch_embed(x, mask=patch_mask) x = self._pos_embed(x) x = self.unroll(x) if mask is not None: x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1]) intermediates = [] if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x) if i in take_indices: x_int = self.reroll(x, i, mask=mask) intermediates.append(x_int.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x_int) if intermediates_only: return intermediates return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True, coarse: bool=True): if coarse: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] else: (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_head: self.head.reset(0, reset_other=True) return take_indices def forward_features(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None, return_intermediates: bool=False) -> torch.Tensor: if self.training and self.patch_drop_rate > 0: assert mask is None mask = self.get_random_mask(x, mask_ratio=self.patch_drop_rate) if mask is not None: patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) else: patch_mask = None x = self.patch_embed(x, mask=patch_mask) x = self._pos_embed(x) x = self.unroll(x) if mask is not None: x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1]) intermediates = [] for (i, blk) in enumerate(self.blocks): if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(blk, x) else: x = blk(x) if return_intermediates and i in self.stage_ends: intermediates.append(self.reroll(x, i, mask=mask)) if return_intermediates: return (x, intermediates) return x def forward_head(self, x, pre_logits: bool=False) -> torch.Tensor: x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None) -> torch.Tensor: x = self.forward_features(x, mask=mask) if mask is None: x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'hiera_tiny_224.mae_in1k_ft_in1k': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0'), 'hiera_tiny_224.mae': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0), 'hiera_small_224.mae_in1k_ft_in1k': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0'), 'hiera_small_224.mae': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0), 'hiera_base_224.mae_in1k_ft_in1k': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0'), 'hiera_base_224.mae': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0), 'hiera_base_plus_224.mae_in1k_ft_in1k': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0'), 'hiera_base_plus_224.mae': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0), 'hiera_large_224.mae_in1k_ft_in1k': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0'), 'hiera_large_224.mae': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0), 'hiera_huge_224.mae_in1k_ft_in1k': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0'), 'hiera_huge_224.mae': _cfg(hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0), 'hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'hiera_small_abswin_256.sbb2_e200_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'hiera_small_abswin_256.sbb2_pd_e200_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'hiera_base_abswin_256.untrained': _cfg(input_size=(3, 256, 256), crop_pct=0.95)}) def checkpoint_filter_fn(state_dict, model=None): state_dict = state_dict.get('model_state', state_dict) output = {} for (k, v) in state_dict.items(): if 'head.projection.' in k: k = k.replace('head.projection.', 'head.fc.') if k.startswith('encoder_norm.'): k = k.replace('encoder_norm.', 'head.norm.') elif k.startswith('norm.'): k = k.replace('norm.', 'head.norm.') if k == 'pos_embed_abs': k = 'pos_embed' output[k] = v return output def _create_hiera(variant: str, pretrained: bool=False, **kwargs) -> Hiera: out_indices = kwargs.pop('out_indices', 4) return build_model_with_cfg(Hiera, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) @register_model def hiera_tiny_224(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 7, 2)) return _create_hiera('hiera_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_small_224(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 11, 2)) return _create_hiera('hiera_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_base_224(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3)) return _create_hiera('hiera_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_base_plus_224(pretrained=False, **kwargs): model_args = dict(embed_dim=112, num_heads=2, stages=(2, 3, 16, 3)) return _create_hiera('hiera_base_plus_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_large_224(pretrained=False, **kwargs): model_args = dict(embed_dim=144, num_heads=2, stages=(2, 6, 36, 4)) return _create_hiera('hiera_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_huge_224(pretrained=False, **kwargs): model_args = dict(embed_dim=256, num_heads=4, stages=(2, 6, 36, 4)) return _create_hiera('hiera_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_small_abswin_256(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 11, 2), abs_win_pos_embed=True, global_pos_size=(16, 16), init_values=1e-05, weight_init='jax', use_expand_proj=False) return _create_hiera('hiera_small_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_base_abswin_256(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3), abs_win_pos_embed=True, init_values=1e-05, weight_init='jax') return _create_hiera('hiera_base_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/hieradet_sam2.py import math from copy import deepcopy from functools import partial from typing import Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, ClNormMlpClassifierHead, LayerScale, get_norm_layer, get_act_layer, init_weight_jax, init_weight_vit, to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations def window_partition(x, window_size: Tuple[int, int]): (B, H, W, C) = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows def window_unpartition(windows: torch.Tensor, window_size: Tuple[int, int], hw: Tuple[int, int]): (H, W) = hw B = windows.shape[0] // (H * W // window_size[0] // window_size[1]) x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x def _calc_pad(H: int, W: int, window_size: Tuple[int, int]) -> Tuple[int, int, int, int]: pad_h = (window_size[0] - H % window_size[0]) % window_size[0] pad_w = (window_size[1] - W % window_size[1]) % window_size[1] (Hp, Wp) = (H + pad_h, W + pad_w) return (Hp, Wp, pad_h, pad_w) class MultiScaleAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim: int, dim_out: int, num_heads: int, q_pool: nn.Module=None): super().__init__() self.dim = dim self.dim_out = dim_out self.num_heads = num_heads head_dim = dim_out // num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.q_pool = q_pool self.qkv = nn.Linear(dim, dim_out * 3) self.proj = nn.Linear(dim_out, dim_out) def forward(self, x: torch.Tensor) -> torch.Tensor: (B, H, W, _) = x.shape qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) (q, k, v) = torch.unbind(qkv, 2) if self.q_pool is not None: q = q.reshape(B, H, W, -1).permute(0, 3, 1, 2) q = self.q_pool(q).permute(0, 2, 3, 1) (H, W) = q.shape[1:3] q = q.reshape(B, H * W, self.num_heads, -1) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, H, W, -1) x = self.proj(x) return x class MultiScaleBlock(nn.Module): def __init__(self, dim: int, dim_out: int, num_heads: int, mlp_ratio: float=4.0, q_stride: Optional[Tuple[int, int]]=None, norm_layer: Union[nn.Module, str]='LayerNorm', act_layer: Union[nn.Module, str]='GELU', window_size: int=0, init_values: Optional[float]=None, drop_path: float=0.0): super().__init__() norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) self.window_size = to_2tuple(window_size) self.is_windowed = any(self.window_size) self.dim = dim self.dim_out = dim_out self.q_stride = q_stride if dim != dim_out: self.proj = nn.Linear(dim, dim_out) else: self.proj = nn.Identity() self.pool = None if self.q_stride: self.pool = nn.MaxPool2d(kernel_size=q_stride, stride=q_stride, ceil_mode=False) self.norm1 = norm_layer(dim) self.attn = MultiScaleAttention(dim, dim_out, num_heads=num_heads, q_pool=deepcopy(self.pool)) self.ls1 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(dim_out * mlp_ratio), act_layer=act_layer) self.ls2 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.norm1(x) if self.dim != self.dim_out: shortcut = self.proj(x) if self.pool is not None: shortcut = shortcut.permute(0, 3, 1, 2) shortcut = self.pool(shortcut).permute(0, 2, 3, 1) window_size = self.window_size (H, W) = x.shape[1:3] (Hp, Wp) = (H, W) if self.is_windowed: (Hp, Wp, pad_h, pad_w) = _calc_pad(H, W, window_size) x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) x = window_partition(x, window_size) x = self.attn(x) if self.q_stride is not None: window_size = (self.window_size[0] // self.q_stride[0], self.window_size[1] // self.q_stride[1]) (H, W) = shortcut.shape[1:3] (Hp, Wp, pad_h, pad_w) = _calc_pad(H, W, window_size) if self.is_windowed: x = window_unpartition(x, window_size, (Hp, Wp)) x = x[:, :H, :W, :].contiguous() x = shortcut + self.drop_path1(self.ls1(x)) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class HieraPatchEmbed(nn.Module): def __init__(self, kernel_size: Tuple[int, ...]=(7, 7), stride: Tuple[int, ...]=(4, 4), padding: Tuple[int, ...]=(3, 3), in_chans: int=3, embed_dim: int=768): super().__init__() self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) x = x.permute(0, 2, 3, 1) return x class HieraDet(nn.Module): def __init__(self, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', embed_dim: int=96, num_heads: int=1, patch_kernel: Tuple[int, ...]=(7, 7), patch_stride: Tuple[int, ...]=(4, 4), patch_padding: Tuple[int, ...]=(3, 3), patch_size: Optional[Tuple[int, ...]]=None, q_pool: int=3, q_stride: Tuple[int, int]=(2, 2), stages: Tuple[int, ...]=(2, 3, 16, 3), dim_mul: float=2.0, head_mul: float=2.0, global_pos_size: Tuple[int, int]=(7, 7), window_spec: Tuple[int, ...]=(8, 4, 14, 7), global_att_blocks: Tuple[int, ...]=(12, 16, 20), init_values: Optional[float]=None, weight_init: str='', fix_init: bool=True, head_init_scale: float=0.001, drop_rate: float=0.0, drop_path_rate: float=0.0, norm_layer: Union[nn.Module, str]='LayerNorm', act_layer: Union[nn.Module, str]='GELU'): super().__init__() norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) assert len(stages) == len(window_spec) self.num_classes = num_classes self.window_spec = window_spec self.output_fmt = 'NHWC' depth = sum(stages) self.q_stride = q_stride self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] assert 0 <= q_pool <= len(self.stage_ends[:-1]) self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] if patch_size is not None: self.patch_embed = PatchEmbed(img_size=None, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, output_fmt='NHWC', dynamic_img_pad=True) else: self.patch_embed = HieraPatchEmbed(kernel_size=patch_kernel, stride=patch_stride, padding=patch_padding, in_chans=in_chans, embed_dim=embed_dim) self.global_att_blocks = global_att_blocks self.global_pos_size = global_pos_size self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.global_pos_size)) self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] cur_stage = 0 self.blocks = nn.Sequential() self.feature_info = [] for i in range(depth): dim_out = embed_dim window_size = self.window_spec[cur_stage] if self.global_att_blocks is not None: window_size = 0 if i in self.global_att_blocks else window_size if i - 1 in self.stage_ends: dim_out = int(embed_dim * dim_mul) num_heads = int(num_heads * head_mul) cur_stage += 1 block = MultiScaleBlock(dim=embed_dim, dim_out=dim_out, num_heads=num_heads, drop_path=dpr[i], q_stride=self.q_stride if i in self.q_pool_blocks else None, window_size=window_size, norm_layer=norm_layer, act_layer=act_layer) embed_dim = dim_out self.blocks.append(block) if i in self.stage_ends: self.feature_info += [dict(num_chs=dim_out, reduction=2 ** (cur_stage + 2), module=f'blocks.{self.stage_ends[cur_stage]}')] self.num_features = self.head_hidden_size = embed_dim self.head = ClNormMlpClassifierHead(embed_dim, num_classes, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer) if self.pos_embed is not None: nn.init.trunc_normal_(self.pos_embed, std=0.02) if self.pos_embed_window is not None: nn.init.trunc_normal_(self.pos_embed_window, std=0.02) if weight_init != 'skip': init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit init_fn = partial(init_fn, classifier_name='head.fc') named_apply(init_fn, self) if fix_init: self.fix_init_weight() if isinstance(self.head, ClNormMlpClassifierHead) and isinstance(self.head.fc, nn.Linear): self.head.fc.weight.data.mul_(head_init_scale) self.head.fc.bias.data.mul_(head_init_scale) def _pos_embed(self, x: torch.Tensor) -> torch.Tensor: (h, w) = x.shape[1:3] window_embed = self.pos_embed_window pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode='bicubic') tile_h = pos_embed.shape[-2] // window_embed.shape[-2] tile_w = pos_embed.shape[-1] // window_embed.shape[-1] pos_embed = pos_embed + window_embed.tile((tile_h, tile_w)) pos_embed = pos_embed.permute(0, 2, 3, 1) return x + pos_embed def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for (layer_id, layer) in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) @torch.jit.ignore def no_weight_decay(self): return ['pos_embed', 'pos_embed_window'] @torch.jit.ignore def group_matcher(self, coarse: bool=False) -> Dict: return dict(stem='^pos_embed|pos_embed_window|patch_embed', blocks=[('^blocks\\.(\\d+)', None)]) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True) -> None: self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None, reset_other: bool=False): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool, reset_other=reset_other) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=True, output_fmt: str='NCHW', intermediates_only: bool=False, coarse: bool=True) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert not norm, 'normalization of features not supported' assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.' if coarse: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] else: (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) x = self.patch_embed(x) x = self._pos_embed(x) intermediates = [] if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x) if i in take_indices: x_out = x.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x intermediates.append(x_out) if intermediates_only: return intermediates return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True, coarse: bool=True): if coarse: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] else: (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_head: self.head.reset(0, reset_other=prune_norm) return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) x = self._pos_embed(x) for (i, blk) in enumerate(self.blocks): x = blk(x) return x def forward_head(self, x, pre_logits: bool=False) -> torch.Tensor: x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 0, 'input_size': (3, 896, 896), 'pool_size': (28, 28), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'min_input_size': (3, 224, 224), 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'sam2_hiera_tiny.r224': _cfg(hf_hub_id='facebook/sam2-hiera-tiny', hf_hub_filename='sam2_hiera_tiny.pt', input_size=(3, 224, 224), pool_size=(7, 7)), 'sam2_hiera_tiny.r896': _cfg(hf_hub_id='facebook/sam2-hiera-tiny', hf_hub_filename='sam2_hiera_tiny.pt'), 'sam2_hiera_small': _cfg(hf_hub_id='facebook/sam2-hiera-small', hf_hub_filename='sam2_hiera_small.pt'), 'sam2_hiera_base_plus': _cfg(hf_hub_id='facebook/sam2-hiera-base-plus', hf_hub_filename='sam2_hiera_base_plus.pt'), 'sam2_hiera_large': _cfg(hf_hub_id='facebook/sam2-hiera-large', hf_hub_filename='sam2_hiera_large.pt', min_input_size=(3, 256, 256), input_size=(3, 1024, 1024), pool_size=(32, 32)), 'hieradet_small.untrained': _cfg(num_classes=1000, input_size=(3, 256, 256), pool_size=(8, 8))}) def checkpoint_filter_fn(state_dict, model=None, prefix=''): state_dict = state_dict.get('model', state_dict) output = {} for (k, v) in state_dict.items(): if k.startswith(prefix): k = k.replace(prefix, '') else: continue k = k.replace('mlp.layers.0', 'mlp.fc1') k = k.replace('mlp.layers.1', 'mlp.fc2') output[k] = v return output def _create_hiera_det(variant: str, pretrained: bool=False, **kwargs) -> HieraDet: out_indices = kwargs.pop('out_indices', 4) checkpoint_prefix = '' if 'sam2' in variant: kwargs.setdefault('pretrained_strict', False) checkpoint_prefix = 'image_encoder.trunk.' return build_model_with_cfg(HieraDet, variant, pretrained, pretrained_filter_fn=partial(checkpoint_filter_fn, prefix=checkpoint_prefix), feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) @register_model def sam2_hiera_tiny(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 7, 2), global_att_blocks=(5, 7, 9)) return _create_hiera_det('sam2_hiera_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_small(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13)) return _create_hiera_det('sam2_hiera_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_base_plus(pretrained=False, **kwargs): model_args = dict(embed_dim=112, num_heads=2, global_pos_size=(14, 14)) return _create_hiera_det('sam2_hiera_base_plus', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_large(pretrained=False, **kwargs): model_args = dict(embed_dim=144, num_heads=2, stages=(2, 6, 36, 4), global_att_blocks=(23, 33, 43), window_spec=(8, 4, 16, 8)) return _create_hiera_det('sam2_hiera_large', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hieradet_small(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13), window_spec=(8, 4, 16, 8), init_values=1e-05) return _create_hiera_det('hieradet_small', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/hrnet.py """""" import logging from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._features import FeatureInfo from ._registry import register_model, generate_default_cfgs from .resnet import BasicBlock, Bottleneck __all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] _BN_MOMENTUM = 0.1 _logger = logging.getLogger(__name__) cfg_cls = dict(hrnet_w18_small=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(1,), num_channels=(32,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(16, 32), fuse_method='SUM'), stage3=dict(num_modules=1, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(16, 32, 64), fuse_method='SUM'), stage4=dict(num_modules=1, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(16, 32, 64, 128), fuse_method='SUM')), hrnet_w18_small_v2=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(2,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(18, 36), fuse_method='SUM'), stage3=dict(num_modules=3, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(18, 36, 72), fuse_method='SUM'), stage4=dict(num_modules=2, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(18, 36, 72, 144), fuse_method='SUM')), hrnet_w18=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(18, 36), fuse_method='SUM'), stage3=dict(num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(18, 36, 72), fuse_method='SUM'), stage4=dict(num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(18, 36, 72, 144), fuse_method='SUM')), hrnet_w30=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(30, 60), fuse_method='SUM'), stage3=dict(num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(30, 60, 120), fuse_method='SUM'), stage4=dict(num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(30, 60, 120, 240), fuse_method='SUM')), hrnet_w32=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(32, 64), fuse_method='SUM'), stage3=dict(num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128), fuse_method='SUM'), stage4=dict(num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256), fuse_method='SUM')), hrnet_w40=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(40, 80), fuse_method='SUM'), stage3=dict(num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(40, 80, 160), fuse_method='SUM'), stage4=dict(num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(40, 80, 160, 320), fuse_method='SUM')), hrnet_w44=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(44, 88), fuse_method='SUM'), stage3=dict(num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(44, 88, 176), fuse_method='SUM'), stage4=dict(num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(44, 88, 176, 352), fuse_method='SUM')), hrnet_w48=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(48, 96), fuse_method='SUM'), stage3=dict(num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(48, 96, 192), fuse_method='SUM'), stage4=dict(num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(48, 96, 192, 384), fuse_method='SUM')), hrnet_w64=dict(stem_width=64, stage1=dict(num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM'), stage2=dict(num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(64, 128), fuse_method='SUM'), stage3=dict(num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(64, 128, 256), fuse_method='SUM'), stage4=dict(num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(64, 128, 256, 512), fuse_method='SUM'))) class HighResolutionModule(nn.Module): def __init__(self, num_branches, block_types, num_blocks, num_in_chs, num_channels, fuse_method, multi_scale_output=True): super(HighResolutionModule, self).__init__() self._check_branches(num_branches, block_types, num_blocks, num_in_chs, num_channels) self.num_in_chs = num_in_chs self.fuse_method = fuse_method self.num_branches = num_branches self.multi_scale_output = multi_scale_output self.branches = self._make_branches(num_branches, block_types, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.fuse_act = nn.ReLU(False) def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels): error_msg = '' if num_branches != len(num_blocks): error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks)) elif num_branches != len(num_channels): error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels)) elif num_branches != len(num_in_chs): error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs)) if error_msg: _logger.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion: downsample = nn.Sequential(nn.Conv2d(self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM)) layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)] self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion for i in range(1, num_blocks[branch_index]): layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index])) return nn.Sequential(*layers) def _make_branches(self, num_branches, block_type, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return nn.Identity() num_branches = self.num_branches num_in_chs = self.num_in_chs fuse_layers = [] for i in range(num_branches if self.multi_scale_output else 1): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append(nn.Sequential(nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False), nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM), nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) elif j == i: fuse_layer.append(nn.Identity()) else: conv3x3s = [] for k in range(i - j): if k == i - j - 1: num_out_chs_conv3x3 = num_in_chs[i] conv3x3s.append(nn.Sequential(nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM))) else: num_out_chs_conv3x3 = num_in_chs[j] conv3x3s.append(nn.Sequential(nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM), nn.ReLU(False))) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def get_num_in_chs(self): return self.num_in_chs def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if self.num_branches == 1: return [self.branches[0](x[0])] for (i, branch) in enumerate(self.branches): x[i] = branch(x[i]) x_fuse = [] for (i, fuse_outer) in enumerate(self.fuse_layers): y = None for (j, f) in enumerate(fuse_outer): if y is None: y = f(x[j]) else: y = y + f(x[j]) x_fuse.append(self.fuse_act(y)) return x_fuse class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method def forward(self, x): pass @torch.jit._overload_method def forward(self, x): pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x @torch.jit.interface class ModuleInterface(torch.nn.Module): def forward(self, input: torch.Tensor) -> torch.Tensor: pass block_types_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} class HighResolutionNet(nn.Module): def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, head='classification', **kwargs): super(HighResolutionNet, self).__init__() self.num_classes = num_classes assert output_stride == 32 cfg.update(**kwargs) stem_width = cfg['stem_width'] self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) self.act2 = nn.ReLU(inplace=True) self.stage1_cfg = cfg['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = block_types_dict[self.stage1_cfg['block_type']] num_blocks = self.stage1_cfg['num_blocks'][0] self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks) stage1_out_channel = block_type.expansion * num_channels self.stage2_cfg = cfg['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = block_types_dict[self.stage2_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) (self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels) self.stage3_cfg = cfg['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = block_types_dict[self.stage3_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) (self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels) self.stage4_cfg = cfg['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = block_types_dict[self.stage4_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) (self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) self.head = head self.head_channels = None head_conv_bias = cfg.pop('head_conv_bias', True) if head == 'classification': self.num_features = self.head_hidden_size = 2048 (self.incre_modules, self.downsamp_modules, self.final_layer) = self._make_head(pre_stage_channels, conv_bias=head_conv_bias) (self.global_pool, self.head_drop, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) else: if head == 'incre': self.num_features = self.head_hidden_size = 2048 (self.incre_modules, _, _) = self._make_head(pre_stage_channels, incre_only=True) else: self.num_features = self.head_hidden_size = 256 self.incre_modules = None self.global_pool = nn.Identity() self.head_drop = nn.Identity() self.classifier = nn.Identity() curr_stride = 2 self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] for (i, c) in enumerate(self.head_channels if self.head_channels else num_channels): curr_stride *= 2 c = c * 4 if self.head_channels else c self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] self.init_weights() def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True): head_block_type = Bottleneck self.head_channels = [32, 64, 128, 256] incre_modules = [] for (i, channels) in enumerate(pre_stage_channels): incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1)) incre_modules = nn.ModuleList(incre_modules) if incre_only: return (incre_modules, None, None) downsamp_modules = [] for i in range(len(pre_stage_channels) - 1): in_channels = self.head_channels[i] * head_block_type.expansion out_channels = self.head_channels[i + 1] * head_block_type.expansion downsamp_module = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True)) downsamp_modules.append(downsamp_module) downsamp_modules = nn.ModuleList(downsamp_modules) final_layer = nn.Sequential(nn.Conv2d(in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features, kernel_size=1, stride=1, padding=0, bias=conv_bias), nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True)) return (incre_modules, downsamp_modules, final_layer) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) else: transition_layers.append(nn.Identity()) else: conv3x3s = [] for j in range(i + 1 - num_branches_pre): _in_chs = num_channels_pre_layer[-1] _out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs conv3x3s.append(nn.Sequential(nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False), nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv3x3s)) return nn.ModuleList(transition_layers) def _make_layer(self, block_type, inplanes, planes, block_types, stride=1): downsample = None if stride != 1 or inplanes != planes * block_type.expansion: downsample = nn.Sequential(nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM)) layers = [block_type(inplanes, planes, stride, downsample)] inplanes = planes * block_type.expansion for i in range(1, block_types): layers.append(block_type(inplanes, planes)) return nn.Sequential(*layers) def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block_type = block_types_dict[layer_config['block_type']] fuse_method = layer_config['fuse_method'] modules = [] for i in range(num_modules): reset_multi_scale_output = multi_scale_output or i < num_modules - 1 modules.append(HighResolutionModule(num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output)) num_in_chs = modules[-1].get_num_in_chs() return (SequentialList(*modules), num_in_chs) @torch.jit.ignore def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^conv[12]|bn[12]', block_types='^(?:layer|stage|transition)(\\d+)' if coarse else [('^layer(\\d+)\\.(\\d+)', None), ('^stage(\\d+)\\.(\\d+)', None), ('^transition(\\d+)', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.classifier) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def stages(self, x) -> List[torch.Tensor]: x = self.layer1(x) xl = [t(x) for (i, t) in enumerate(self.transition1)] yl = self.stage2(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for (i, t) in enumerate(self.transition2)] yl = self.stage3(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for (i, t) in enumerate(self.transition3)] yl = self.stage4(xl) return yl def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) yl = self.stages(x) if self.incre_modules is None or self.downsamp_modules is None: return yl y = None for (i, incre) in enumerate(self.incre_modules): if y is None: y = incre(yl[i]) else: down: ModuleInterface = self.downsamp_modules[i - 1] y = incre(yl[i]) + down.forward(y) y = self.final_layer(y) return y def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classifier(x) def forward(self, x): y = self.forward_features(x) x = self.forward_head(y) return x class HighResolutionNetFeatures(HighResolutionNet): def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, feature_location='incre', out_indices=(0, 1, 2, 3, 4), **kwargs): assert feature_location in ('incre', '') super(HighResolutionNetFeatures, self).__init__(cfg, in_chans=in_chans, num_classes=num_classes, output_stride=output_stride, global_pool=global_pool, drop_rate=drop_rate, head=feature_location, **kwargs) self.feature_info = FeatureInfo(self.feature_info, out_indices) self._out_idx = {f['index'] for f in self.feature_info.get_dicts()} def forward_features(self, x): assert False, 'Not supported' def forward(self, x) -> List[torch.Tensor]: out = [] x = self.conv1(x) x = self.bn1(x) x = self.act1(x) if 0 in self._out_idx: out.append(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.stages(x) if self.incre_modules is not None: x = [incre(f) for (f, incre) in zip(x, self.incre_modules)] for (i, f) in enumerate(x): if i + 1 in self._out_idx: out.append(f) return out def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs): model_cls = HighResolutionNet features_only = False kwargs_filter = None if model_kwargs.pop('features_only', False): model_cls = HighResolutionNetFeatures kwargs_filter = ('num_classes', 'global_pool') features_only = True cfg_variant = cfg_variant or variant pretrained_strict = model_kwargs.pop('pretrained_strict', not features_only and model_kwargs.get('head', 'classification') == 'classification') model = build_model_with_cfg(model_cls, variant, pretrained, model_cfg=cfg_cls[cfg_variant], pretrained_strict=pretrained_strict, kwargs_filter=kwargs_filter, **model_kwargs) if features_only: model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg) model.default_cfg = model.pretrained_cfg return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18.ms_aug_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95), 'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_ssld.paddle_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288)), 'hrnet_w48_ssld.paddle_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288))}) @register_model def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) @register_model def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) @register_model def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18', pretrained, **kwargs) @register_model def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w30', pretrained, **kwargs) @register_model def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w32', pretrained, **kwargs) @register_model def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w40', pretrained, **kwargs) @register_model def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w44', pretrained, **kwargs) @register_model def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w48', pretrained, **kwargs) @register_model def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w64', pretrained, **kwargs) @register_model def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs) @register_model def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/inception_next.py """""" from functools import partial from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, DropPath, to_2tuple, get_padding, SelectAdaptivePool2d from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['MetaNeXt'] class InceptionDWConv2d(nn.Module): def __init__(self, in_chs, square_kernel_size=3, band_kernel_size=11, branch_ratio=0.125, dilation=1): super().__init__() gc = int(in_chs * branch_ratio) square_padding = get_padding(square_kernel_size, dilation=dilation) band_padding = get_padding(band_kernel_size, dilation=dilation) self.dwconv_hw = nn.Conv2d(gc, gc, square_kernel_size, padding=square_padding, dilation=dilation, groups=gc) self.dwconv_w = nn.Conv2d(gc, gc, (1, band_kernel_size), padding=(0, band_padding), dilation=(1, dilation), groups=gc) self.dwconv_h = nn.Conv2d(gc, gc, (band_kernel_size, 1), padding=(band_padding, 0), dilation=(dilation, 1), groups=gc) self.split_indexes = (in_chs - 3 * gc, gc, gc, gc) def forward(self, x): (x_id, x_hw, x_w, x_h) = torch.split(x, self.split_indexes, dim=1) return torch.cat((x_id, self.dwconv_hw(x_hw), self.dwconv_w(x_w), self.dwconv_h(x_h)), dim=1) class ConvMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=True, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.drop = nn.Dropout(drop) self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x class MlpClassifierHead(nn.Module): def __init__(self, in_features, num_classes=1000, pool_type='avg', mlp_ratio=3, act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), drop=0.0, bias=True): super().__init__() self.use_conv = False self.in_features = in_features self.num_features = hidden_features = int(mlp_ratio * in_features) assert pool_type, 'Cannot disable pooling' self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) self.fc1 = nn.Linear(in_features * self.global_pool.feat_mult(), hidden_features, bias=bias) self.act = act_layer() self.norm = norm_layer(hidden_features) self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) self.drop = nn.Dropout(drop) def reset(self, num_classes: int, pool_type: Optional[str]=None): if pool_type is not None: assert pool_type, 'Cannot disable pooling' self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) self.fc2 = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.fc1(x) x = self.act(x) x = self.norm(x) x = self.drop(x) return x if pre_logits else self.fc2(x) class MetaNeXtBlock(nn.Module): def __init__(self, dim, dilation=1, token_mixer=InceptionDWConv2d, norm_layer=nn.BatchNorm2d, mlp_layer=ConvMlp, mlp_ratio=4, act_layer=nn.GELU, ls_init_value=1e-06, drop_path=0.0): super().__init__() self.token_mixer = token_mixer(dim, dilation=dilation) self.norm = norm_layer(dim) self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value else None self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = x x = self.token_mixer(x) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) + shortcut return x class MetaNeXtStage(nn.Module): def __init__(self, in_chs, out_chs, stride=2, depth=2, dilation=(1, 1), drop_path_rates=None, ls_init_value=1.0, token_mixer=InceptionDWConv2d, act_layer=nn.GELU, norm_layer=None, mlp_ratio=4): super().__init__() self.grad_checkpointing = False if stride > 1 or dilation[0] != dilation[1]: self.downsample = nn.Sequential(norm_layer(in_chs), nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=stride, dilation=dilation[0])) else: self.downsample = nn.Identity() drop_path_rates = drop_path_rates or [0.0] * depth stage_blocks = [] for i in range(depth): stage_blocks.append(MetaNeXtBlock(dim=out_chs, dilation=dilation[1], drop_path=drop_path_rates[i], ls_init_value=ls_init_value, token_mixer=token_mixer, act_layer=act_layer, norm_layer=norm_layer, mlp_ratio=mlp_ratio)) self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class MetaNeXt(nn.Module): def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d, norm_layer=nn.BatchNorm2d, act_layer=nn.GELU, mlp_ratios=(4, 4, 4, 3), drop_rate=0.0, drop_path_rate=0.0, ls_init_value=1e-06): super().__init__() num_stage = len(depths) if not isinstance(token_mixers, (list, tuple)): token_mixers = [token_mixers] * num_stage if not isinstance(mlp_ratios, (list, tuple)): mlp_ratios = [mlp_ratios] * num_stage self.num_classes = num_classes self.global_pool = global_pool self.drop_rate = drop_rate self.feature_info = [] self.stem = nn.Sequential(nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), norm_layer(dims[0])) dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_chs = dims[0] curr_stride = 4 dilation = 1 self.stages = nn.Sequential() for i in range(num_stage): stride = 2 if curr_stride == 2 or i > 0 else 1 if curr_stride >= output_stride and stride > 1: dilation *= stride stride = 1 curr_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 out_chs = dims[i] self.stages.append(MetaNeXtStage(prev_chs, out_chs, stride=stride if i > 0 else 1, dilation=(first_dilation, dilation), depth=depths[i], drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, act_layer=act_layer, token_mixer=token_mixers[i], norm_layer=norm_layer, mlp_ratio=mlp_ratios[i])) prev_chs = out_chs self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] self.num_features = prev_chs self.head = MlpClassifierHead(self.num_features, num_classes, pool_type=self.global_pool, drop=drop_rate) self.head_hidden_size = self.head.num_features self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+)\\.downsample', (0,)), ('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None)]) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc2 def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def no_weight_decay(self): return set() def forward_features(self, x): x = self.stem(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc2', **kwargs} default_cfgs = generate_default_cfgs({'inception_next_tiny.sail_in1k': _cfg(hf_hub_id='timm/'), 'inception_next_small.sail_in1k': _cfg(hf_hub_id='timm/'), 'inception_next_base.sail_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95), 'inception_next_base.sail_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0)}) def _create_inception_next(variant, pretrained=False, **kwargs): model = build_model_with_cfg(MetaNeXt, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model @register_model def inception_next_tiny(pretrained=False, **kwargs): model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d) return _create_inception_next('inception_next_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def inception_next_small(pretrained=False, **kwargs): model_args = dict(depths=(3, 3, 27, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d) return _create_inception_next('inception_next_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def inception_next_base(pretrained=False, **kwargs): model_args = dict(depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024), token_mixers=InceptionDWConv2d) return _create_inception_next('inception_next_base', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/inception_resnet_v2.py """""" from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionResnetV2'] class Mixed_5b(nn.Module): def __init__(self, conv_block=None): super(Mixed_5b, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(192, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential(conv_block(192, 48, kernel_size=1, stride=1), conv_block(48, 64, kernel_size=5, stride=1, padding=2)) self.branch2 = nn.Sequential(conv_block(192, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1)) self.branch3 = nn.Sequential(nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(192, 64, kernel_size=1, stride=1)) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block35(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block35, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential(conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 32, kernel_size=3, stride=1, padding=1)) self.branch2 = nn.Sequential(conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 48, kernel_size=3, stride=1, padding=1), conv_block(48, 64, kernel_size=3, stride=1, padding=1)) self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_6a(nn.Module): def __init__(self, conv_block=None): super(Mixed_6a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential(conv_block(320, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=3, stride=1, padding=1), conv_block(256, 384, kernel_size=3, stride=2)) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Block17(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block17, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential(conv_block(1088, 128, kernel_size=1, stride=1), conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0))) self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_7a(nn.Module): def __init__(self, conv_block=None): super(Mixed_7a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = nn.Sequential(conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 384, kernel_size=3, stride=2)) self.branch1 = nn.Sequential(conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=2)) self.branch2 = nn.Sequential(conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=1, padding=1), conv_block(288, 320, kernel_size=3, stride=2)) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block8(nn.Module): def __init__(self, scale=1.0, no_relu=False, conv_block=None): super(Block8, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential(conv_block(2080, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))) self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) self.relu = None if no_relu else nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if self.relu is not None: out = self.relu(out) return out class InceptionResnetV2(nn.Module): def __init__(self, num_classes=1000, in_chans=3, drop_rate=0.0, output_stride=32, global_pool='avg', norm_layer='batchnorm2d', norm_eps=0.001, act_layer='relu'): super(InceptionResnetV2, self).__init__() self.num_classes = num_classes self.num_features = self.head_hidden_size = 1536 assert output_stride == 32 conv_block = partial(ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True)) self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2) self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1) self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1) self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1) self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1) self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b(conv_block=conv_block) self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)]) self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] self.mixed_6a = Mixed_6a(conv_block=conv_block) self.repeat_1 = nn.Sequential(*[Block17(scale=0.1, conv_block=conv_block) for _ in range(20)]) self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] self.mixed_7a = Mixed_7a(conv_block=conv_block) self.repeat_2 = nn.Sequential(*[Block8(scale=0.2, conv_block=conv_block) for _ in range(9)]) self.block8 = Block8(no_relu=True, conv_block=conv_block) self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1) self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] (self.global_pool, self.head_drop, self.classif) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for (i, (k, _)) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('classif',)) def _matcher(name): if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): return 1 elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): return len(module_map) + 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classif def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.classif) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classif(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) default_cfgs = generate_default_cfgs({'inception_resnet_v2.tf_in1k': {'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif'}, 'inception_resnet_v2.tf_ens_adv_in1k': {'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif'}}) @register_model def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2: return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, {'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k'}) # File: pytorch-image-models-main/timm/models/inception_v3.py """""" from functools import partial from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import trunc_normal_, create_classifier, Linear, ConvNormAct from ._builder import build_model_with_cfg from ._builder import resolve_pretrained_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionV3'] class InceptionA(nn.Module): def __init__(self, in_channels, pool_features, conv_block=None): super(InceptionA, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionB(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionB, self).__init__() conv_block = conv_block or ConvNormAct self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3(x) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionC(nn.Module): def __init__(self, in_channels, channels_7x7, conv_block=None): super(InceptionC, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) c7 = channels_7x7 self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch7x7 = self.branch7x7_1(x) branch7x7 = self.branch7x7_2(branch7x7) branch7x7 = self.branch7x7_3(branch7x7) branch7x7dbl = self.branch7x7dbl_1(x) branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionD(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionD, self).__init__() conv_block = conv_block or ConvNormAct self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch7x7x3 = self.branch7x7x3_1(x) branch7x7x3 = self.branch7x7x3_2(branch7x7x3) branch7x7x3 = self.branch7x7x3_3(branch7x7x3) branch7x7x3 = self.branch7x7x3_4(branch7x7x3) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch7x7x3, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionE(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionE, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)] branch3x3dbl = torch.cat(branch3x3dbl, 1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes, conv_block=None): super(InceptionAux, self).__init__() conv_block = conv_block or ConvNormAct self.conv0 = conv_block(in_channels, 128, kernel_size=1) self.conv1 = conv_block(128, 768, kernel_size=5) self.conv1.stddev = 0.01 self.fc = Linear(768, num_classes) self.fc.stddev = 0.001 def forward(self, x): x = F.avg_pool2d(x, kernel_size=5, stride=3) x = self.conv0(x) x = self.conv1(x) x = F.adaptive_avg_pool2d(x, (1, 1)) x = torch.flatten(x, 1) x = self.fc(x) return x class InceptionV3(nn.Module): aux_logits: torch.jit.Final[bool] def __init__(self, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg', aux_logits=False, norm_layer='batchnorm2d', norm_eps=0.001, act_layer='relu'): super(InceptionV3, self).__init__() self.num_classes = num_classes self.aux_logits = aux_logits conv_block = partial(ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True)) self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) self.Mixed_5b = InceptionA(192, pool_features=32, conv_block=conv_block) self.Mixed_5c = InceptionA(256, pool_features=64, conv_block=conv_block) self.Mixed_5d = InceptionA(288, pool_features=64, conv_block=conv_block) self.Mixed_6a = InceptionB(288, conv_block=conv_block) self.Mixed_6b = InceptionC(768, channels_7x7=128, conv_block=conv_block) self.Mixed_6c = InceptionC(768, channels_7x7=160, conv_block=conv_block) self.Mixed_6d = InceptionC(768, channels_7x7=160, conv_block=conv_block) self.Mixed_6e = InceptionC(768, channels_7x7=192, conv_block=conv_block) if aux_logits: self.AuxLogits = InceptionAux(768, num_classes, conv_block=conv_block) else: self.AuxLogits = None self.Mixed_7a = InceptionD(768, conv_block=conv_block) self.Mixed_7b = InceptionE(1280, conv_block=conv_block) self.Mixed_7c = InceptionE(2048, conv_block=conv_block) self.feature_info = [dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), dict(num_chs=288, reduction=8, module='Mixed_5d'), dict(num_chs=768, reduction=16, module='Mixed_6e'), dict(num_chs=2048, reduction=32, module='Mixed_7c')] self.num_features = self.head_hidden_size = 2048 (self.global_pool, self.head_drop, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): stddev = m.stddev if hasattr(m, 'stddev') else 0.1 trunc_normal_(m.weight, std=stddev) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for (i, (k, _)) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('fc',)) def _matcher(name): if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]): return 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_preaux(self, x): x = self.Conv2d_1a_3x3(x) x = self.Conv2d_2a_3x3(x) x = self.Conv2d_2b_3x3(x) x = self.Pool1(x) x = self.Conv2d_3b_1x1(x) x = self.Conv2d_4a_3x3(x) x = self.Pool2(x) x = self.Mixed_5b(x) x = self.Mixed_5c(x) x = self.Mixed_5d(x) x = self.Mixed_6a(x) x = self.Mixed_6b(x) x = self.Mixed_6c(x) x = self.Mixed_6d(x) x = self.Mixed_6e(x) return x def forward_postaux(self, x): x = self.Mixed_7a(x) x = self.Mixed_7b(x) x = self.Mixed_7c(x) return x def forward_features(self, x): x = self.forward_preaux(x) if self.aux_logits: aux = self.AuxLogits(x) x = self.forward_postaux(x) return (x, aux) x = self.forward_postaux(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) if pre_logits: return x x = self.fc(x) return x def forward(self, x): if self.aux_logits: (x, aux) = self.forward_features(x) x = self.forward_head(x) return (x, aux) x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_v3(variant, pretrained=False, **kwargs): pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None)) aux_logits = kwargs.get('aux_logits', False) has_aux_logits = False if pretrained_cfg: has_aux_logits = pretrained_cfg.tag == 'tv_in1k' if aux_logits: assert not kwargs.pop('features_only', False) load_strict = has_aux_logits else: load_strict = not has_aux_logits return build_model_with_cfg(InceptionV3, variant, pretrained, pretrained_cfg=pretrained_cfg, pretrained_strict=load_strict, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', **kwargs} default_cfgs = generate_default_cfgs({'inception_v3.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'), 'inception_v3.tf_in1k': _cfg(hf_hub_id='timm/'), 'inception_v3.tf_adv_in1k': _cfg(hf_hub_id='timm/'), 'inception_v3.gluon_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD)}) @register_model def inception_v3(pretrained=False, **kwargs) -> InceptionV3: model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, {'tf_inception_v3': 'inception_v3.tf_in1k', 'adv_inception_v3': 'inception_v3.tf_adv_in1k', 'gluon_inception_v3': 'inception_v3.gluon_in1k'}) # File: pytorch-image-models-main/timm/models/inception_v4.py """""" from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['InceptionV4'] class Mixed3a(nn.Module): def __init__(self, conv_block=ConvNormAct): super(Mixed3a, self).__init__() self.maxpool = nn.MaxPool2d(3, stride=2) self.conv = conv_block(64, 96, kernel_size=3, stride=2) def forward(self, x): x0 = self.maxpool(x) x1 = self.conv(x) out = torch.cat((x0, x1), 1) return out class Mixed4a(nn.Module): def __init__(self, conv_block=ConvNormAct): super(Mixed4a, self).__init__() self.branch0 = nn.Sequential(conv_block(160, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1)) self.branch1 = nn.Sequential(conv_block(160, 64, kernel_size=1, stride=1), conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(64, 96, kernel_size=(3, 3), stride=1)) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) return out class Mixed5a(nn.Module): def __init__(self, conv_block=ConvNormAct): super(Mixed5a, self).__init__() self.conv = conv_block(192, 192, kernel_size=3, stride=2) self.maxpool = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.conv(x) x1 = self.maxpool(x) out = torch.cat((x0, x1), 1) return out class InceptionA(nn.Module): def __init__(self, conv_block=ConvNormAct): super(InceptionA, self).__init__() self.branch0 = conv_block(384, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential(conv_block(384, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1)) self.branch2 = nn.Sequential(conv_block(384, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1)) self.branch3 = nn.Sequential(nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(384, 96, kernel_size=1, stride=1)) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class ReductionA(nn.Module): def __init__(self, conv_block=ConvNormAct): super(ReductionA, self).__init__() self.branch0 = conv_block(384, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential(conv_block(384, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=3, stride=1, padding=1), conv_block(224, 256, kernel_size=3, stride=2)) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class InceptionB(nn.Module): def __init__(self, conv_block=ConvNormAct): super(InceptionB, self).__init__() self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1) self.branch1 = nn.Sequential(conv_block(1024, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0))) self.branch2 = nn.Sequential(conv_block(1024, 192, kernel_size=1, stride=1), conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3))) self.branch3 = nn.Sequential(nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(1024, 128, kernel_size=1, stride=1)) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class ReductionB(nn.Module): def __init__(self, conv_block=ConvNormAct): super(ReductionB, self).__init__() self.branch0 = nn.Sequential(conv_block(1024, 192, kernel_size=1, stride=1), conv_block(192, 192, kernel_size=3, stride=2)) self.branch1 = nn.Sequential(conv_block(1024, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(320, 320, kernel_size=3, stride=2)) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class InceptionC(nn.Module): def __init__(self, conv_block=ConvNormAct): super(InceptionC, self).__init__() self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1) self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1) self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1) self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) self.branch3 = nn.Sequential(nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(1536, 256, kernel_size=1, stride=1)) def forward(self, x): x0 = self.branch0(x) x1_0 = self.branch1_0(x) x1_1a = self.branch1_1a(x1_0) x1_1b = self.branch1_1b(x1_0) x1 = torch.cat((x1_1a, x1_1b), 1) x2_0 = self.branch2_0(x) x2_1 = self.branch2_1(x2_0) x2_2 = self.branch2_2(x2_1) x2_3a = self.branch2_3a(x2_2) x2_3b = self.branch2_3b(x2_2) x2 = torch.cat((x2_3a, x2_3b), 1) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class InceptionV4(nn.Module): def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0.0, global_pool='avg', norm_layer='batchnorm2d', norm_eps=0.001, act_layer='relu'): super(InceptionV4, self).__init__() assert output_stride == 32 self.num_classes = num_classes self.num_features = self.head_hidden_size = 1536 conv_block = partial(ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True)) features = [conv_block(in_chans, 32, kernel_size=3, stride=2), conv_block(32, 32, kernel_size=3, stride=1), conv_block(32, 64, kernel_size=3, stride=1, padding=1), Mixed3a(conv_block), Mixed4a(conv_block), Mixed5a(conv_block)] features += [InceptionA(conv_block) for _ in range(4)] features += [ReductionA(conv_block)] features += [InceptionB(conv_block) for _ in range(7)] features += [ReductionB(conv_block)] features += [InceptionC(conv_block) for _ in range(3)] self.features = nn.Sequential(*features) self.feature_info = [dict(num_chs=64, reduction=2, module='features.2'), dict(num_chs=160, reduction=4, module='features.3'), dict(num_chs=384, reduction=8, module='features.9'), dict(num_chs=1024, reduction=16, module='features.17'), dict(num_chs=1536, reduction=32, module='features.21')] (self.global_pool, self.head_drop, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^features\\.[012]\\.', blocks='^features\\.(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.last_linear def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): return self.features(x) def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4: return build_model_with_cfg(InceptionV4, variant, pretrained, feature_cfg=dict(flatten_sequential=True), **kwargs) default_cfgs = generate_default_cfgs({'inception_v4.tf_in1k': {'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'features.0.conv', 'classifier': 'last_linear'}}) @register_model def inception_v4(pretrained=False, **kwargs): return _create_inception_v4('inception_v4', pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/layers/__init__.py from timm.layers.activations import * from timm.layers.adaptive_avgmax_pool import adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from timm.layers.attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from timm.layers.blur_pool import BlurPool2d from timm.layers.classifier import ClassifierHead, create_classifier from timm.layers.cond_conv2d import CondConv2d, get_condconv_initializer from timm.layers.config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit, set_layer_config from timm.layers.conv2d_same import Conv2dSame, conv2d_same from timm.layers.conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from timm.layers.create_act import create_act_layer, get_act_layer, get_act_fn from timm.layers.create_attn import get_attn, create_attn from timm.layers.create_conv2d import create_conv2d from timm.layers.create_norm import get_norm_layer, create_norm_layer from timm.layers.create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from timm.layers.drop import DropBlock2d, DropPath, drop_block_2d, drop_path from timm.layers.eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from timm.layers.evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2, EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from timm.layers.fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from timm.layers.filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from timm.layers.gather_excite import GatherExcite from timm.layers.global_context import GlobalContext from timm.layers.helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from timm.layers.inplace_abn import InplaceAbn from timm.layers.linear import Linear from timm.layers.mixed_conv2d import MixedConv2d from timm.layers.mlp import Mlp, GluMlp, GatedMlp, ConvMlp from timm.layers.non_local_attn import NonLocalAttn, BatNonLocalAttn from timm.layers.norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d from timm.layers.norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm from timm.layers.padding import get_padding, get_same_padding, pad_same from timm.layers.patch_embed import PatchEmbed from timm.layers.pool2d_same import AvgPool2dSame, create_pool2d from timm.layers.squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from timm.layers.selective_kernel import SelectiveKernel from timm.layers.separable_conv import SeparableConv2d, SeparableConvNormAct from timm.layers.split_attn import SplitAttn from timm.layers.split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from timm.layers.std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from timm.layers.test_time_pool import TestTimePoolHead, apply_test_time_pool from timm.layers.trace_utils import _assert, _float_to_int from timm.layers.weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_ import warnings warnings.warn(f'Importing from {__name__} is deprecated, please import via timm.layers', DeprecationWarning) # File: pytorch-image-models-main/timm/models/levit.py """""" from collections import OrderedDict from functools import partial from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN from timm.layers import to_ntuple, to_2tuple, get_act_layer, DropPath, trunc_normal_, ndgrid from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['Levit'] class ConvNorm(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.linear = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, dilation, groups, bias=False) self.bn = nn.BatchNorm2d(out_chs) nn.init.constant_(self.bn.weight, bn_weight_init) @torch.no_grad() def fuse(self): (c, bn) = (self.linear, self.bn) w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Conv2d(w.size(1), w.size(0), w.shape[2:], stride=self.linear.stride, padding=self.linear.padding, dilation=self.linear.dilation, groups=self.linear.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def forward(self, x): return self.bn(self.linear(x)) class LinearNorm(nn.Module): def __init__(self, in_features, out_features, bn_weight_init=1): super().__init__() self.linear = nn.Linear(in_features, out_features, bias=False) self.bn = nn.BatchNorm1d(out_features) nn.init.constant_(self.bn.weight, bn_weight_init) @torch.no_grad() def fuse(self): (l, bn) = (self.linear, self.bn) w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[:, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Linear(w.size(1), w.size(0)) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def forward(self, x): x = self.linear(x) return self.bn(x.flatten(0, 1)).reshape_as(x) class NormLinear(nn.Module): def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.0): super().__init__() self.bn = nn.BatchNorm1d(in_features) self.drop = nn.Dropout(drop) self.linear = nn.Linear(in_features, out_features, bias=bias) trunc_normal_(self.linear.weight, std=std) if self.linear.bias is not None: nn.init.constant_(self.linear.bias, 0) @torch.no_grad() def fuse(self): (bn, l) = (self.bn, self.linear) w = bn.weight / (bn.running_var + bn.eps) ** 0.5 b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[None, :] if l.bias is None: b = b @ self.linear.weight.T else: b = (l.weight @ b[:, None]).view(-1) + self.linear.bias m = nn.Linear(w.size(1), w.size(0)) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def forward(self, x): return self.linear(self.drop(self.bn(x))) class Stem8(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.stride = 8 self.add_module('conv1', ConvNorm(in_chs, out_chs // 4, 3, stride=2, padding=1)) self.add_module('act1', act_layer()) self.add_module('conv2', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1)) self.add_module('act2', act_layer()) self.add_module('conv3', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1)) class Stem16(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.stride = 16 self.add_module('conv1', ConvNorm(in_chs, out_chs // 8, 3, stride=2, padding=1)) self.add_module('act1', act_layer()) self.add_module('conv2', ConvNorm(out_chs // 8, out_chs // 4, 3, stride=2, padding=1)) self.add_module('act2', act_layer()) self.add_module('conv3', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1)) self.add_module('act3', act_layer()) self.add_module('conv4', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1)) class Downsample(nn.Module): def __init__(self, stride, resolution, use_pool=False): super().__init__() self.stride = stride self.resolution = to_2tuple(resolution) self.pool = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) if use_pool else None def forward(self, x): (B, N, C) = x.shape x = x.view(B, self.resolution[0], self.resolution[1], C) if self.pool is not None: x = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) else: x = x[:, ::self.stride, ::self.stride] return x.reshape(B, -1, C) class Attention(nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4.0, resolution=14, use_conv=False, act_layer=nn.SiLU): super().__init__() ln_layer = ConvNorm if use_conv else LinearNorm resolution = to_2tuple(resolution) self.use_conv = use_conv self.num_heads = num_heads self.scale = key_dim ** (-0.5) self.key_dim = key_dim self.key_attn_dim = key_dim * num_heads self.val_dim = int(attn_ratio * key_dim) self.val_attn_dim = int(attn_ratio * key_dim) * num_heads self.qkv = ln_layer(dim, self.val_attn_dim + self.key_attn_dim * 2) self.proj = nn.Sequential(OrderedDict([('act', act_layer()), ('ln', ln_layer(self.val_attn_dim, dim, bn_weight_init=0))])) self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = rel_pos[0] * resolution[1] + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): if self.use_conv: (B, C, H, W) = x.shape (q, k, v) = self.qkv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.val_dim], dim=2) attn = q.transpose(-2, -1) @ k * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) else: (B, N, C) = x.shape (q, k, v) = self.qkv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3) q = q.permute(0, 2, 1, 3) k = k.permute(0, 2, 3, 1) v = v.permute(0, 2, 1, 3) attn = q @ k * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) x = self.proj(x) return x class AttentionDownsample(nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__(self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2.0, stride=2, resolution=14, use_conv=False, use_pool=False, act_layer=nn.SiLU): super().__init__() resolution = to_2tuple(resolution) self.stride = stride self.resolution = resolution self.num_heads = num_heads self.key_dim = key_dim self.key_attn_dim = key_dim * num_heads self.val_dim = int(attn_ratio * key_dim) self.val_attn_dim = self.val_dim * self.num_heads self.scale = key_dim ** (-0.5) self.use_conv = use_conv if self.use_conv: ln_layer = ConvNorm sub_layer = partial(nn.AvgPool2d, kernel_size=3 if use_pool else 1, padding=1 if use_pool else 0, count_include_pad=False) else: ln_layer = LinearNorm sub_layer = partial(Downsample, resolution=resolution, use_pool=use_pool) self.kv = ln_layer(in_dim, self.val_attn_dim + self.key_attn_dim) self.q = nn.Sequential(OrderedDict([('down', sub_layer(stride=stride)), ('ln', ln_layer(in_dim, self.key_attn_dim))])) self.proj = nn.Sequential(OrderedDict([('act', act_layer()), ('ln', ln_layer(self.val_attn_dim, out_dim))])) self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) k_pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) q_pos = torch.stack(ndgrid(torch.arange(0, resolution[0], step=stride), torch.arange(0, resolution[1], step=stride))).flatten(1) rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() rel_pos = rel_pos[0] * resolution[1] + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): if self.use_conv: (B, C, H, W) = x.shape (HH, WW) = ((H - 1) // self.stride + 1, (W - 1) // self.stride + 1) (k, v) = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.val_dim], dim=2) q = self.q(x).view(B, self.num_heads, self.key_dim, -1) attn = q.transpose(-2, -1) @ k * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (v @ attn.transpose(-2, -1)).reshape(B, self.val_attn_dim, HH, WW) else: (B, N, C) = x.shape (k, v) = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.val_dim], dim=3) k = k.permute(0, 2, 3, 1) v = v.permute(0, 2, 1, 3) q = self.q(x).view(B, -1, self.num_heads, self.key_dim).permute(0, 2, 1, 3) attn = q @ k * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, -1, self.val_attn_dim) x = self.proj(x) return x class LevitMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, use_conv=False, act_layer=nn.SiLU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features ln_layer = ConvNorm if use_conv else LinearNorm self.ln1 = ln_layer(in_features, hidden_features) self.act = act_layer() self.drop = nn.Dropout(drop) self.ln2 = ln_layer(hidden_features, out_features, bn_weight_init=0) def forward(self, x): x = self.ln1(x) x = self.act(x) x = self.drop(x) x = self.ln2(x) return x class LevitDownsample(nn.Module): def __init__(self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=4.0, mlp_ratio=2.0, act_layer=nn.SiLU, attn_act_layer=None, resolution=14, use_conv=False, use_pool=False, drop_path=0.0): super().__init__() attn_act_layer = attn_act_layer or act_layer self.attn_downsample = AttentionDownsample(in_dim=in_dim, out_dim=out_dim, key_dim=key_dim, num_heads=num_heads, attn_ratio=attn_ratio, act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, use_pool=use_pool) self.mlp = LevitMlp(out_dim, int(out_dim * mlp_ratio), use_conv=use_conv, act_layer=act_layer) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = self.attn_downsample(x) x = x + self.drop_path(self.mlp(x)) return x class LevitBlock(nn.Module): def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4.0, mlp_ratio=2.0, resolution=14, use_conv=False, act_layer=nn.SiLU, attn_act_layer=None, drop_path=0.0): super().__init__() attn_act_layer = attn_act_layer or act_layer self.attn = Attention(dim=dim, key_dim=key_dim, num_heads=num_heads, attn_ratio=attn_ratio, resolution=resolution, use_conv=use_conv, act_layer=attn_act_layer) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = LevitMlp(dim, int(dim * mlp_ratio), use_conv=use_conv, act_layer=act_layer) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.attn(x)) x = x + self.drop_path2(self.mlp(x)) return x class LevitStage(nn.Module): def __init__(self, in_dim, out_dim, key_dim, depth=4, num_heads=8, attn_ratio=4.0, mlp_ratio=4.0, act_layer=nn.SiLU, attn_act_layer=None, resolution=14, downsample='', use_conv=False, drop_path=0.0): super().__init__() resolution = to_2tuple(resolution) if downsample: self.downsample = LevitDownsample(in_dim, out_dim, key_dim=key_dim, num_heads=in_dim // key_dim, attn_ratio=4.0, mlp_ratio=2.0, act_layer=act_layer, attn_act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, drop_path=drop_path) resolution = [(r - 1) // 2 + 1 for r in resolution] else: assert in_dim == out_dim self.downsample = nn.Identity() blocks = [] for _ in range(depth): blocks += [LevitBlock(out_dim, key_dim, num_heads=num_heads, attn_ratio=attn_ratio, mlp_ratio=mlp_ratio, act_layer=act_layer, attn_act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, drop_path=drop_path)] self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class Levit(nn.Module): def __init__(self, img_size=224, in_chans=3, num_classes=1000, embed_dim=(192,), key_dim=64, depth=(12,), num_heads=(3,), attn_ratio=2.0, mlp_ratio=2.0, stem_backbone=None, stem_stride=None, stem_type='s16', down_op='subsample', act_layer='hard_swish', attn_act_layer=None, use_conv=False, global_pool='avg', drop_rate=0.0, drop_path_rate=0.0): super().__init__() act_layer = get_act_layer(act_layer) attn_act_layer = get_act_layer(attn_act_layer or act_layer) self.use_conv = use_conv self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = embed_dim[-1] self.embed_dim = embed_dim self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] num_stages = len(embed_dim) assert len(depth) == num_stages num_heads = to_ntuple(num_stages)(num_heads) attn_ratio = to_ntuple(num_stages)(attn_ratio) mlp_ratio = to_ntuple(num_stages)(mlp_ratio) if stem_backbone is not None: assert stem_stride >= 2 self.stem = stem_backbone stride = stem_stride else: assert stem_type in ('s16', 's8') if stem_type == 's16': self.stem = Stem16(in_chans, embed_dim[0], act_layer=act_layer) else: self.stem = Stem8(in_chans, embed_dim[0], act_layer=act_layer) stride = self.stem.stride resolution = tuple([i // p for (i, p) in zip(to_2tuple(img_size), to_2tuple(stride))]) in_dim = embed_dim[0] stages = [] for i in range(num_stages): stage_stride = 2 if i > 0 else 1 stages += [LevitStage(in_dim, embed_dim[i], key_dim, depth=depth[i], num_heads=num_heads[i], attn_ratio=attn_ratio[i], mlp_ratio=mlp_ratio[i], act_layer=act_layer, attn_act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, downsample=down_op if stage_stride == 2 else '', drop_path=drop_path_rate)] stride *= stage_stride resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] in_dim = embed_dim[i] self.stages = nn.Sequential(*stages) self.head = NormLinear(embed_dim[-1], num_classes, drop=drop_rate) if num_classes > 0 else nn.Identity() @torch.jit.ignore def no_weight_decay(self): return {x for x in self.state_dict().keys() if 'attention_biases' in x} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^cls_token|pos_embed|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = NormLinear(self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stages), indices) x = self.stem(x) (B, C, H, W) = x.shape if not self.use_conv: x = x.flatten(2).transpose(1, 2) if torch.jit.is_scripting() or not stop_early: stages = self.stages else: stages = self.stages[:max_index + 1] for (feat_idx, stage) in enumerate(stages): x = stage(x) if feat_idx in take_indices: if self.use_conv: intermediates.append(x) else: intermediates.append(x.reshape(B, H, W, -1).permute(0, 3, 1, 2)) H = (H + 2 - 1) // 2 W = (W + 2 - 1) // 2 if intermediates_only: return intermediates return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) if not self.use_conv: x = x.flatten(2).transpose(1, 2) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class LevitDistilled(Levit): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.head_dist = NormLinear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() self.distilled_training = False @torch.jit.ignore def get_classifier(self) -> nn.Module: return (self.head, self.head_dist) def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = NormLinear(self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity() self.head_dist = NormLinear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) if pre_logits: return x (x, x_dist) = (self.head(x), self.head_dist(x)) if self.distilled_training and self.training and (not torch.jit.is_scripting()): return (x, x_dist) else: return (x + x_dist) / 2 def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] state_dict = {k: v for (k, v) in state_dict.items() if 'attention_bias_idxs' not in k} D = model.state_dict() out_dict = {} for (ka, kb, va, vb) in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()): if va.ndim == 4 and vb.ndim == 2: vb = vb[:, :, None, None] if va.shape != vb.shape: assert 'head' in ka or 'stem.conv1.linear' in ka out_dict[ka] = vb return out_dict model_cfgs = dict(levit_128s=dict(embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), levit_128=dict(embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), levit_192=dict(embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), levit_256=dict(embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), levit_384=dict(embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), levit_384_s8=dict(embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4), act_layer='silu', stem_type='s8'), levit_512_s8=dict(embed_dim=(512, 640, 896), key_dim=64, num_heads=(8, 10, 14), depth=(4, 4, 4), act_layer='silu', stem_type='s8'), levit_512=dict(embed_dim=(512, 768, 1024), key_dim=64, num_heads=(8, 12, 16), depth=(4, 4, 4), act_layer='silu'), levit_256d=dict(embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 8, 6), act_layer='silu'), levit_512d=dict(embed_dim=(512, 640, 768), key_dim=64, num_heads=(8, 10, 12), depth=(4, 8, 6), act_layer='silu')) def create_levit(variant, cfg_variant=None, pretrained=False, distilled=True, **kwargs): is_conv = '_conv' in variant out_indices = kwargs.pop('out_indices', (0, 1, 2)) if kwargs.get('features_only', False) and (not is_conv): kwargs.setdefault('feature_cls', 'getter') if cfg_variant is None: if variant in model_cfgs: cfg_variant = variant elif is_conv: cfg_variant = variant.replace('_conv', '') model_cfg = dict(model_cfgs[cfg_variant], **kwargs) model = build_model_with_cfg(LevitDistilled if distilled else Levit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **model_cfg) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.linear', 'classifier': ('head.linear', 'head_dist.linear'), **kwargs} default_cfgs = generate_default_cfgs({'levit_128s.fb_dist_in1k': _cfg(hf_hub_id='timm/'), 'levit_128.fb_dist_in1k': _cfg(hf_hub_id='timm/'), 'levit_192.fb_dist_in1k': _cfg(hf_hub_id='timm/'), 'levit_256.fb_dist_in1k': _cfg(hf_hub_id='timm/'), 'levit_384.fb_dist_in1k': _cfg(hf_hub_id='timm/'), 'levit_conv_128s.fb_dist_in1k': _cfg(hf_hub_id='timm/', pool_size=(4, 4)), 'levit_conv_128.fb_dist_in1k': _cfg(hf_hub_id='timm/', pool_size=(4, 4)), 'levit_conv_192.fb_dist_in1k': _cfg(hf_hub_id='timm/', pool_size=(4, 4)), 'levit_conv_256.fb_dist_in1k': _cfg(hf_hub_id='timm/', pool_size=(4, 4)), 'levit_conv_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', pool_size=(4, 4)), 'levit_384_s8.untrained': _cfg(classifier='head.linear'), 'levit_512_s8.untrained': _cfg(classifier='head.linear'), 'levit_512.untrained': _cfg(classifier='head.linear'), 'levit_256d.untrained': _cfg(classifier='head.linear'), 'levit_512d.untrained': _cfg(classifier='head.linear'), 'levit_conv_384_s8.untrained': _cfg(classifier='head.linear'), 'levit_conv_512_s8.untrained': _cfg(classifier='head.linear'), 'levit_conv_512.untrained': _cfg(classifier='head.linear'), 'levit_conv_256d.untrained': _cfg(classifier='head.linear'), 'levit_conv_512d.untrained': _cfg(classifier='head.linear')}) @register_model def levit_128s(pretrained=False, **kwargs) -> Levit: return create_levit('levit_128s', pretrained=pretrained, **kwargs) @register_model def levit_128(pretrained=False, **kwargs) -> Levit: return create_levit('levit_128', pretrained=pretrained, **kwargs) @register_model def levit_192(pretrained=False, **kwargs) -> Levit: return create_levit('levit_192', pretrained=pretrained, **kwargs) @register_model def levit_256(pretrained=False, **kwargs) -> Levit: return create_levit('levit_256', pretrained=pretrained, **kwargs) @register_model def levit_384(pretrained=False, **kwargs) -> Levit: return create_levit('levit_384', pretrained=pretrained, **kwargs) @register_model def levit_384_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_384_s8', pretrained=pretrained, **kwargs) @register_model def levit_512_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_512_s8', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_512(pretrained=False, **kwargs) -> Levit: return create_levit('levit_512', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_256d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_256d', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_512d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_512d', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_conv_128s(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_128s', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_128(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_128', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_192(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_192', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_256(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_256', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_384(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_384', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_384_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_384_s8', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_512_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_512_s8', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) @register_model def levit_conv_512(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_512', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) @register_model def levit_conv_256d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_256d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) @register_model def levit_conv_512d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_512d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) # File: pytorch-image-models-main/timm/models/maxxvit.py """""" import math from collections import OrderedDict from dataclasses import dataclass, replace, field from functools import partial from typing import Callable, Optional, Union, Tuple, List import torch from torch import nn from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, ConvMlp, DropPath, LayerNorm, ClassifierHead, NormMlpClassifierHead from timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, create_pool2d from timm.layers import trunc_normal_tf_, to_2tuple, extend_tuple, make_divisible, _assert from timm.layers import RelPosMlp, RelPosBias, RelPosBiasTf, use_fused_attn, resize_rel_pos_bias_table from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit'] @dataclass class MaxxVitTransformerCfg: dim_head: int = 32 head_first: bool = True expand_ratio: float = 4.0 expand_first: bool = True shortcut_bias: bool = True attn_bias: bool = True attn_drop: float = 0.0 proj_drop: float = 0.0 pool_type: str = 'avg2' rel_pos_type: str = 'bias' rel_pos_dim: int = 512 partition_ratio: int = 32 window_size: Optional[Tuple[int, int]] = None grid_size: Optional[Tuple[int, int]] = None no_block_attn: bool = False use_nchw_attn: bool = False init_values: Optional[float] = None act_layer: str = 'gelu' norm_layer: str = 'layernorm2d' norm_layer_cl: str = 'layernorm' norm_eps: float = 1e-06 def __post_init__(self): if self.grid_size is not None: self.grid_size = to_2tuple(self.grid_size) if self.window_size is not None: self.window_size = to_2tuple(self.window_size) if self.grid_size is None: self.grid_size = self.window_size @dataclass class MaxxVitConvCfg: block_type: str = 'mbconv' expand_ratio: float = 4.0 expand_output: bool = True kernel_size: int = 3 group_size: int = 1 pre_norm_act: bool = False output_bias: bool = True stride_mode: str = 'dw' pool_type: str = 'avg2' downsample_pool_type: str = 'avg2' padding: str = '' attn_early: bool = False attn_layer: str = 'se' attn_act_layer: str = 'silu' attn_ratio: float = 0.25 init_values: Optional[float] = 1e-06 act_layer: str = 'gelu' norm_layer: str = '' norm_layer_cl: str = '' norm_eps: Optional[float] = None def __post_init__(self): assert self.block_type in ('mbconv', 'convnext') use_mbconv = self.block_type == 'mbconv' if not self.norm_layer: self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d' if not self.norm_layer_cl and (not use_mbconv): self.norm_layer_cl = 'layernorm' if self.norm_eps is None: self.norm_eps = 1e-05 if use_mbconv else 1e-06 self.downsample_pool_type = self.downsample_pool_type or self.pool_type @dataclass class MaxxVitCfg: embed_dim: Tuple[int, ...] = (96, 192, 384, 768) depths: Tuple[int, ...] = (2, 3, 5, 2) block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T') stem_width: Union[int, Tuple[int, int]] = 64 stem_bias: bool = False conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg) transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg) head_hidden_size: int = None weight_init: str = 'vit_eff' class Attention2d(nn.Module): fused_attn: Final[bool] '' def __init__(self, dim: int, dim_out: Optional[int]=None, dim_head: int=32, bias: bool=True, expand_first: bool=True, head_first: bool=True, rel_pos_cls: Callable=None, attn_drop: float=0.0, proj_drop: float=0.0): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first else dim self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.head_first = head_first self.scale = dim_head ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor]=None): (B, C, H, W) = x.shape if self.head_first: (q, k, v) = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) else: (q, k, v) = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1) if self.fused_attn: attn_bias = None if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos x = torch.nn.functional.scaled_dot_product_attention(q.transpose(-1, -2).contiguous(), k.transpose(-1, -2).contiguous(), v.transpose(-1, -2).contiguous(), attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0.0).transpose(-1, -2).reshape(B, -1, H, W) else: q = q * self.scale attn = q.transpose(-2, -1) @ k if self.rel_pos is not None: attn = self.rel_pos(attn) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x class AttentionCl(nn.Module): fused_attn: Final[bool] def __init__(self, dim: int, dim_out: Optional[int]=None, dim_head: int=32, bias: bool=True, expand_first: bool=True, head_first: bool=True, rel_pos_cls: Callable=None, attn_drop: float=0.0, proj_drop: float=0.0): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first and dim_out > dim else dim assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim' self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.head_first = head_first self.scale = dim_head ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim_attn, dim_out, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor]=None): B = x.shape[0] restore_shape = x.shape[:-1] if self.head_first: (q, k, v) = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3) else: (q, k, v) = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.dim_head).transpose(1, 3).unbind(2) if self.fused_attn: attn_bias = None if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(restore_shape + (-1,)) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma return x.mul_(gamma) if self.inplace else x * gamma class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class Downsample2d(nn.Module): def __init__(self, dim: int, dim_out: int, pool_type: str='avg2', padding: str='', bias: bool=True): super().__init__() assert pool_type in ('max', 'max2', 'avg', 'avg2') if pool_type == 'max': self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=padding or 1) elif pool_type == 'max2': self.pool = create_pool2d('max', 2, padding=padding or 0) elif pool_type == 'avg': self.pool = create_pool2d('avg', kernel_size=3, stride=2, count_include_pad=False, padding=padding or 1) else: self.pool = create_pool2d('avg', 2, padding=padding or 0) if dim != dim_out: self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) else: self.expand = nn.Identity() def forward(self, x): x = self.pool(x) x = self.expand(x) return x def _init_transformer(module, name, scheme=''): if isinstance(module, (nn.Conv2d, nn.Linear)): if scheme == 'normal': nn.init.normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-06) else: nn.init.zeros_(module.bias) class TransformerBlock2d(nn.Module): def __init__(self, dim: int, dim_out: int, stride: int=1, rel_pos_cls: Callable=None, cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), drop_path: float=0.0): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) act_layer = get_act_layer(cfg.act_layer) if stride == 2: self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias) self.norm1 = nn.Sequential(OrderedDict([('norm', norm_layer(dim)), ('down', Downsample2d(dim, dim, pool_type=cfg.pool_type))])) else: assert dim == dim_out self.shortcut = nn.Identity() self.norm1 = norm_layer(dim) self.attn = Attention2d(dim, dim_out, dim_head=cfg.dim_head, expand_first=cfg.expand_first, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop) self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = ConvMlp(in_features=dim_out, hidden_features=int(dim_out * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self) def forward(self, x, shared_rel_pos: Optional[torch.Tensor]=None): x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def _init_conv(module, name, scheme=''): if isinstance(module, nn.Conv2d): if scheme == 'normal': nn.init.normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) if module.bias is not None: nn.init.zeros_(module.bias) def num_groups(group_size, channels): if not group_size: return 1 else: assert channels % group_size == 0 return channels // group_size class MbConvBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=1, dilation: Tuple[int, int]=(1, 1), cfg: MaxxVitConvCfg=MaxxVitConvCfg(), drop_path: float=0.0): super(MbConvBlock, self).__init__() norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps) mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio) groups = num_groups(cfg.group_size, mid_chs) if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias, padding=cfg.padding) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', '1x1', 'dw') (stride_pool, stride_1, stride_2) = (1, 1, 1) if cfg.stride_mode == 'pool': (stride_pool, dilation_2) = (stride, dilation[1]) elif cfg.stride_mode == '1x1': (stride_1, dilation_2) = (stride, dilation[1]) else: (stride_2, dilation_2) = (stride, dilation[0]) self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act) if stride_pool > 1: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type, padding=cfg.padding) else: self.down = nn.Identity() self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1) self.norm1 = norm_act_layer(mid_chs) self.conv2_kxk = create_conv2d(mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups, padding=cfg.padding) attn_kwargs = {} if isinstance(cfg.attn_layer, str): if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca': attn_kwargs['act_layer'] = cfg.attn_act_layer attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs)) if cfg.attn_early: self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.norm2 = norm_act_layer(mid_chs) self.se = None else: self.se_early = None self.norm2 = norm_act_layer(mid_chs) self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): shortcut = self.shortcut(x) x = self.pre_norm(x) x = self.down(x) x = self.conv1_1x1(x) x = self.norm1(x) x = self.conv2_kxk(x) if self.se_early is not None: x = self.se_early(x) x = self.norm2(x) if self.se is not None: x = self.se(x) x = self.conv3_1x1(x) x = self.drop_path(x) + shortcut return x class ConvNeXtBlock(nn.Module): def __init__(self, in_chs: int, out_chs: Optional[int]=None, kernel_size: int=7, stride: int=1, dilation: Tuple[int, int]=(1, 1), cfg: MaxxVitConvCfg=MaxxVitConvCfg(), conv_mlp: bool=True, drop_path: float=0.0): super().__init__() out_chs = out_chs or in_chs act_layer = get_act_layer(cfg.act_layer) if conv_mlp: norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) mlp_layer = ConvMlp else: assert 'layernorm' in cfg.norm_layer norm_layer = LayerNorm mlp_layer = Mlp self.use_conv_mlp = conv_mlp if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs) elif in_chs != out_chs: self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', 'dw') (stride_pool, stride_dw) = (1, 1) if cfg.stride_mode == 'pool': stride_pool = stride else: stride_dw = stride if stride_pool == 2: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) else: self.down = nn.Identity() self.conv_dw = create_conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1], depthwise=True, bias=cfg.output_bias) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer) if conv_mlp: self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() else: self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = self.shortcut(x) x = self.down(x) x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) x = self.ls(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = self.ls(x) x = x.permute(0, 3, 1, 2) x = self.drop_path(x) + shortcut return x def window_partition(x, window_size: List[int]): (B, H, W, C) = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, f'width ({W}) must be divisible by window ({window_size[1]})') x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function def window_reverse(windows, window_size: List[int], img_size: List[int]): (H, W) = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x def grid_partition(x, grid_size: List[int]): (B, H, W, C) = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, f'width {W} must be divisible by grid {grid_size[1]}') x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C) return windows @register_notrace_function def grid_reverse(windows, grid_size: List[int], img_size: List[int]): (H, W) = img_size C = windows.shape[-1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C) return x def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size): rel_pos_cls = None if cfg.rel_pos_type == 'mlp': rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim) elif cfg.rel_pos_type == 'bias': rel_pos_cls = partial(RelPosBias, window_size=window_size) elif cfg.rel_pos_type == 'bias_tf': rel_pos_cls = partial(RelPosBiasTf, window_size=window_size) return rel_pos_cls class PartitionAttentionCl(nn.Module): def __init__(self, dim: int, partition_type: str='block', cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), drop_path: float=0.0): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = AttentionCl(dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] if self.partition_block: partitioned = window_partition(x, self.partition_size) else: partitioned = grid_partition(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse(partitioned, self.partition_size, img_size) else: x = grid_reverse(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ParallelPartitionAttention(nn.Module): def __init__(self, dim: int, cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), drop_path: float=0.0): super().__init__() assert dim % 2 == 0 norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) act_layer = get_act_layer(cfg.act_layer) assert cfg.window_size == cfg.grid_size self.partition_size = to_2tuple(cfg.window_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn_block = AttentionCl(dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop) self.attn_grid = AttentionCl(dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * cfg.expand_ratio), out_features=dim, act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] partitioned_block = window_partition(x, self.partition_size) partitioned_block = self.attn_block(partitioned_block) x_window = window_reverse(partitioned_block, self.partition_size, img_size) partitioned_grid = grid_partition(x, self.partition_size) partitioned_grid = self.attn_grid(partitioned_grid) x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size) return torch.cat([x_window, x_grid], dim=-1) def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def window_partition_nchw(x, window_size: List[int]): (B, C, H, W) = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, f'width ({W}) must be divisible by window ({window_size[1]})') x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1]) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1]) return windows @register_notrace_function def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]): (H, W) = img_size C = windows.shape[1] x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1]) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W) return x def grid_partition_nchw(x, grid_size: List[int]): (B, C, H, W) = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, f'width {W} must be divisible by grid {grid_size[1]}') x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1]) windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1]) return windows @register_notrace_function def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]): (H, W) = img_size C = windows.shape[1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1]) x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W) return x class PartitionAttention2d(nn.Module): def __init__(self, dim: int, partition_type: str='block', cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), drop_path: float=0.0): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = Attention2d(dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop) self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = ConvMlp(in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def _partition_attn(self, x): img_size = x.shape[-2:] if self.partition_block: partitioned = window_partition_nchw(x, self.partition_size) else: partitioned = grid_partition_nchw(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse_nchw(partitioned, self.partition_size, img_size) else: x = grid_reverse_nchw(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class MaxxVitBlock(nn.Module): def __init__(self, dim: int, dim_out: int, stride: int=1, conv_cfg: MaxxVitConvCfg=MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), drop_path: float=0.0): super().__init__() self.nchw_attn = transformer_cfg.use_nchw_attn conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) partition_layer = PartitionAttention2d if self.nchw_attn else PartitionAttentionCl self.attn_block = None if transformer_cfg.no_block_attn else partition_layer(**attn_kwargs) self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs) def init_weights(self, scheme=''): if self.attn_block is not None: named_apply(partial(_init_transformer, scheme=scheme), self.attn_block) named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): x = self.conv(x) if not self.nchw_attn: x = x.permute(0, 2, 3, 1) if self.attn_block is not None: x = self.attn_block(x) x = self.attn_grid(x) if not self.nchw_attn: x = x.permute(0, 3, 1, 2) return x class ParallelMaxxVitBlock(nn.Module): def __init__(self, dim, dim_out, stride=1, num_conv=2, conv_cfg: MaxxVitConvCfg=MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), drop_path=0.0): super().__init__() conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock if num_conv > 1: convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1) self.conv = nn.Sequential(*convs) else: self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): x = self.conv(x) x = x.permute(0, 2, 3, 1) x = self.attn(x) x = x.permute(0, 3, 1, 2) return x class MaxxVitStage(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=2, depth: int=4, feat_size: Tuple[int, int]=(14, 14), block_types: Union[str, Tuple[str]]='C', transformer_cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), conv_cfg: MaxxVitConvCfg=MaxxVitConvCfg(), drop_path: Union[float, List[float]]=0.0): super().__init__() self.grad_checkpointing = False block_types = extend_tuple(block_types, depth) blocks = [] for (i, t) in enumerate(block_types): block_stride = stride if i == 0 else 1 assert t in ('C', 'T', 'M', 'PM') if t == 'C': conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock blocks += [conv_cls(in_chs, out_chs, stride=block_stride, cfg=conv_cfg, drop_path=drop_path[i])] elif t == 'T': rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size) blocks += [TransformerBlock2d(in_chs, out_chs, stride=block_stride, rel_pos_cls=rel_pos_cls, cfg=transformer_cfg, drop_path=drop_path[i])] elif t == 'M': blocks += [MaxxVitBlock(in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i])] elif t == 'PM': blocks += [ParallelMaxxVitBlock(in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i])] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class Stem(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, padding: str='', bias: bool=False, act_layer: str='gelu', norm_layer: str='batchnorm2d', norm_eps: float=1e-05): super().__init__() if not isinstance(out_chs, (list, tuple)): out_chs = to_2tuple(out_chs) norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) self.out_chs = out_chs[-1] self.stride = 2 self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2, padding=padding, bias=bias) self.norm1 = norm_act_layer(out_chs[0]) self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1, padding=padding, bias=bias) def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) return x def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]): if cfg.window_size is not None: assert cfg.grid_size return cfg partition_size = (img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio) cfg = replace(cfg, window_size=partition_size, grid_size=partition_size) return cfg def _overlay_kwargs(cfg: MaxxVitCfg, **kwargs): transformer_kwargs = {} conv_kwargs = {} base_kwargs = {} for (k, v) in kwargs.items(): if k.startswith('transformer_'): transformer_kwargs[k.replace('transformer_', '')] = v elif k.startswith('conv_'): conv_kwargs[k.replace('conv_', '')] = v else: base_kwargs[k] = v cfg = replace(cfg, transformer_cfg=replace(cfg.transformer_cfg, **transformer_kwargs), conv_cfg=replace(cfg.conv_cfg, **conv_kwargs), **base_kwargs) return cfg class MaxxVit(nn.Module): def __init__(self, cfg: MaxxVitCfg, img_size: Union[int, Tuple[int, int]]=224, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', drop_rate: float=0.0, drop_path_rate: float=0.0, **kwargs): super().__init__() img_size = to_2tuple(img_size) if kwargs: cfg = _overlay_kwargs(cfg, **kwargs) transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size) self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = cfg.embed_dim[-1] self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] self.stem = Stem(in_chs=in_chans, out_chs=cfg.stem_width, padding=cfg.conv_cfg.padding, bias=cfg.stem_bias, act_layer=cfg.conv_cfg.act_layer, norm_layer=cfg.conv_cfg.norm_layer, norm_eps=cfg.conv_cfg.norm_eps) stride = self.stem.stride self.feature_info += [dict(num_chs=self.stem.out_chs, reduction=2, module='stem')] feat_size = tuple([i // s for (i, s) in zip(img_size, to_2tuple(stride))]) num_stages = len(cfg.embed_dim) assert len(cfg.depths) == num_stages dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] in_chs = self.stem.out_chs stages = [] for i in range(num_stages): stage_stride = 2 out_chs = cfg.embed_dim[i] feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size]) stages += [MaxxVitStage(in_chs, out_chs, depth=cfg.depths[i], block_types=cfg.block_type[i], conv_cfg=cfg.conv_cfg, transformer_cfg=transformer_cfg, feat_size=feat_size, drop_path=dpr[i])] stride *= stage_stride in_chs = out_chs self.feature_info += [dict(num_chs=out_chs, reduction=stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) final_norm_layer = partial(get_norm_layer(cfg.transformer_cfg.norm_layer), eps=cfg.transformer_cfg.norm_eps) if cfg.head_hidden_size: self.norm = nn.Identity() self.head_hidden_size = cfg.head_hidden_size self.head = NormMlpClassifierHead(self.num_features, num_classes, hidden_size=self.head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, norm_layer=final_norm_layer) else: self.head_hidden_size = self.num_features self.norm = final_norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff') if cfg.weight_init: named_apply(partial(self._init_weights, scheme=cfg.weight_init), self) def _init_weights(self, module, name, scheme=''): if hasattr(module, 'init_weights'): try: module.init_weights(scheme=scheme) except TypeError: module.init_weights() @torch.jit.ignore def no_weight_decay(self): return {k for (k, _) in self.named_parameters() if any((n in k for n in ['relative_position_bias_table', 'rel_pos.mlp']))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks=[('^stages\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stages) + 1, indices) feat_idx = 0 x = self.stem(x) if feat_idx in take_indices: intermediates.append(x) last_idx = len(self.stages) if torch.jit.is_scripting() or not stop_early: stages = self.stages else: stages = self.stages[:max_index] for stage in stages: feat_idx += 1 x = stage(x) if feat_idx in take_indices: if norm and feat_idx == last_idx: x_inter = self.norm(x) else: x_inter = x intermediates.append(x_inter) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stages) + 1, indices) self.stages = self.stages[:max_index] if prune_norm: self.norm = nn.Identity() if prune_head: self.head = self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _rw_coat_cfg(stride_mode='pool', pool_type='avg2', conv_output_bias=False, conv_attn_early=False, conv_attn_act_layer='relu', conv_norm_layer='', transformer_shortcut_bias=True, transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', init_values=None, rel_pos_type='bias', rel_pos_dim=512): return dict(conv_cfg=MaxxVitConvCfg(stride_mode=stride_mode, pool_type=pool_type, pre_norm_act=True, expand_output=False, output_bias=conv_output_bias, attn_early=conv_attn_early, attn_act_layer=conv_attn_act_layer, act_layer='silu', norm_layer=conv_norm_layer), transformer_cfg=MaxxVitTransformerCfg(expand_first=False, shortcut_bias=transformer_shortcut_bias, pool_type=pool_type, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim)) def _rw_max_cfg(stride_mode='dw', pool_type='avg2', conv_output_bias=False, conv_attn_ratio=1 / 16, conv_norm_layer='', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, dim_head=32, init_values=None, rel_pos_type='bias', rel_pos_dim=512): return dict(conv_cfg=MaxxVitConvCfg(stride_mode=stride_mode, pool_type=pool_type, expand_output=False, output_bias=conv_output_bias, attn_ratio=conv_attn_ratio, act_layer='silu', norm_layer=conv_norm_layer), transformer_cfg=MaxxVitTransformerCfg(expand_first=False, pool_type=pool_type, dim_head=dim_head, window_size=window_size, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim)) def _next_cfg(stride_mode='dw', pool_type='avg2', conv_norm_layer='layernorm2d', conv_norm_layer_cl='layernorm', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, no_block_attn=False, init_values=1e-06, rel_pos_type='mlp', rel_pos_dim=512): init_values = to_2tuple(init_values) return dict(conv_cfg=MaxxVitConvCfg(block_type='convnext', stride_mode=stride_mode, pool_type=pool_type, expand_output=False, init_values=init_values[0], norm_layer=conv_norm_layer, norm_layer_cl=conv_norm_layer_cl), transformer_cfg=MaxxVitTransformerCfg(expand_first=False, pool_type=pool_type, window_size=window_size, no_block_attn=no_block_attn, init_values=init_values[1], norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim)) def _tf_cfg(): return dict(conv_cfg=MaxxVitConvCfg(norm_eps=0.001, act_layer='gelu_tanh', padding='same'), transformer_cfg=MaxxVitTransformerCfg(norm_eps=1e-05, act_layer='gelu_tanh', head_first=False, rel_pos_type='bias_tf')) model_cfgs = dict(coatnet_pico_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(2, 3, 5, 2), stem_width=(32, 64), **_rw_max_cfg(conv_output_bias=True, conv_attn_ratio=0.25)), coatnet_nano_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg(stride_mode='pool', conv_output_bias=True, conv_attn_ratio=0.25)), coatnet_0_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), stem_width=(32, 64), **_rw_coat_cfg(conv_attn_early=True, transformer_shortcut_bias=False)), coatnet_1_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg(stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False)), coatnet_2_rw=MaxxVitCfg(embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg(stride_mode='dw', conv_attn_act_layer='silu')), coatnet_3_rw=MaxxVitCfg(embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg(stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-06)), coatnet_bn_0_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), stem_width=(32, 64), **_rw_coat_cfg(stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False, transformer_norm_layer='batchnorm2d')), coatnet_rmlp_nano_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg(conv_output_bias=True, conv_attn_ratio=0.25, rel_pos_type='mlp', rel_pos_dim=384)), coatnet_rmlp_0_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), stem_width=(32, 64), **_rw_coat_cfg(stride_mode='dw', rel_pos_type='mlp')), coatnet_rmlp_1_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg(pool_type='max', conv_attn_early=True, transformer_shortcut_bias=False, rel_pos_type='mlp', rel_pos_dim=384)), coatnet_rmlp_1_rw2=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg(stride_mode='dw', rel_pos_type='mlp', rel_pos_dim=512)), coatnet_rmlp_2_rw=MaxxVitCfg(embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg(stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-06, rel_pos_type='mlp')), coatnet_rmlp_3_rw=MaxxVitCfg(embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg(stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-06, rel_pos_type='mlp')), coatnet_nano_cc=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), block_type=('C', 'C', ('C', 'T'), ('C', 'T')), **_rw_coat_cfg()), coatnext_nano_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), weight_init='normal', **_next_cfg(rel_pos_type='bias', init_values=(1e-05, None))), coatnet_0=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 3, 5, 2), stem_width=64, head_hidden_size=768), coatnet_1=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=64, head_hidden_size=768), coatnet_2=MaxxVitCfg(embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=128, head_hidden_size=1024), coatnet_3=MaxxVitCfg(embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=192, head_hidden_size=1536), coatnet_4=MaxxVitCfg(embed_dim=(192, 384, 768, 1536), depths=(2, 12, 28, 2), stem_width=192, head_hidden_size=1536), coatnet_5=MaxxVitCfg(embed_dim=(256, 512, 1280, 2048), depths=(2, 12, 28, 2), stem_width=192, head_hidden_size=2048), maxvit_pico_rw=MaxxVitCfg(embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg()), maxvit_nano_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg()), maxvit_tiny_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg()), maxvit_tiny_pm=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('PM',) * 4, stem_width=(32, 64), **_rw_max_cfg()), maxvit_rmlp_pico_rw=MaxxVitCfg(embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg(rel_pos_type='mlp')), maxvit_rmlp_nano_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp')), maxvit_rmlp_tiny_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp')), maxvit_rmlp_small_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp', init_values=1e-06)), maxvit_rmlp_base_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=(32, 64), head_hidden_size=768, **_rw_max_cfg(rel_pos_type='mlp')), maxxvit_rmlp_nano_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), weight_init='normal', **_next_cfg()), maxxvit_rmlp_tiny_rw=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_next_cfg()), maxxvit_rmlp_small_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(48, 96), **_next_cfg()), maxxvitv2_nano_rw=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(48, 96), weight_init='normal', **_next_cfg(no_block_attn=True, rel_pos_type='bias')), maxxvitv2_rmlp_base_rw=MaxxVitCfg(embed_dim=(128, 256, 512, 1024), depths=(2, 6, 12, 2), block_type=('M',) * 4, stem_width=(64, 128), **_next_cfg(no_block_attn=True)), maxxvitv2_rmlp_large_rw=MaxxVitCfg(embed_dim=(160, 320, 640, 1280), depths=(2, 6, 16, 2), block_type=('M',) * 4, stem_width=(80, 160), head_hidden_size=1280, **_next_cfg(no_block_attn=True)), maxvit_tiny_tf=MaxxVitCfg(embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=512, **_tf_cfg()), maxvit_small_tf=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=768, **_tf_cfg()), maxvit_base_tf=MaxxVitCfg(embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=768, **_tf_cfg()), maxvit_large_tf=MaxxVitCfg(embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=128, stem_bias=True, head_hidden_size=1024, **_tf_cfg()), maxvit_xlarge_tf=MaxxVitCfg(embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=192, stem_bias=True, head_hidden_size=1536, **_tf_cfg())) def checkpoint_filter_fn(state_dict, model: nn.Module): model_state_dict = model.state_dict() out_dict = {} for (k, v) in state_dict.items(): if k.endswith('relative_position_bias_table'): m = model.get_submodule(k[:-29]) if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: v = resize_rel_pos_bias_table(v, new_window_size=m.window_size, new_bias_shape=m.relative_position_bias_table.shape) if k in model_state_dict and v.ndim != model_state_dict[k].ndim and (v.numel() == model_state_dict[k].numel()): assert v.ndim in (2, 4) v = v.reshape(model_state_dict[k].shape) out_dict[k] = v return out_dict def _create_maxxvit(variant, cfg_variant=None, pretrained=False, **kwargs): if cfg_variant is None: if variant in model_cfgs: cfg_variant = variant else: cfg_variant = '_'.join(variant.split('_')[:-1]) return build_model_with_cfg(MaxxVit, variant, pretrained, model_cfg=model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.conv1', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs} default_cfgs = generate_default_cfgs({'coatnet_pico_rw_224.untrained': _cfg(url=''), 'coatnet_nano_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth', crop_pct=0.9), 'coatnet_0_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'), 'coatnet_1_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth'), 'coatnet_2_rw_224.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'coatnet_bn_0_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95), 'coatnet_rmlp_nano_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth', crop_pct=0.9), 'coatnet_rmlp_0_rw_224.untrained': _cfg(url=''), 'coatnet_rmlp_1_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'), 'coatnet_rmlp_2_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'), 'coatnet_rmlp_3_rw_224.untrained': _cfg(url=''), 'coatnet_nano_cc_224.untrained': _cfg(url=''), 'coatnext_nano_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth', crop_pct=0.9), 'coatnet_2_rw_224.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'coatnet_3_rw_224.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'coatnet_rmlp_1_rw2_224.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'coatnet_rmlp_2_rw_224.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'coatnet_0_224.untrained': _cfg(url=''), 'coatnet_1_224.untrained': _cfg(url=''), 'coatnet_2_224.untrained': _cfg(url=''), 'coatnet_3_224.untrained': _cfg(url=''), 'coatnet_4_224.untrained': _cfg(url=''), 'coatnet_5_224.untrained': _cfg(url=''), 'maxvit_pico_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_nano_rw_256.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'), 'maxvit_tiny_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_pm_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_pico_rw_256.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_nano_rw_256.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_tiny_rw_256.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_small_rw_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth', crop_pct=0.9), 'maxvit_rmlp_small_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_rmlp_base_rw_224.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'maxxvit_rmlp_nano_rw_256.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_tiny_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_small_rw_256.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvitv2_nano_rw_256.sw_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxxvitv2_rmlp_large_rw_224.untrained': _cfg(url=''), 'maxxvitv2_rmlp_base_rw_224.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'maxvit_tiny_tf_224.in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_tiny_tf_384.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_tiny_tf_512.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_small_tf_224.in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_small_tf_384.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_small_tf_512.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_224.in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_base_tf_384.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_512.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_224.in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_large_tf_384.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_512.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_224.in21k': _cfg(hf_hub_id='timm/', num_classes=21843), 'maxvit_base_tf_384.in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_512.in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_224.in21k': _cfg(hf_hub_id='timm/', num_classes=21843), 'maxvit_large_tf_384.in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_512.in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), crop_pct=1.0, crop_mode='squash'), 'maxvit_xlarge_tf_224.in21k': _cfg(hf_hub_id='timm/', num_classes=21843), 'maxvit_xlarge_tf_384.in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_xlarge_tf_512.in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash')}) @register_model def coatnet_pico_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_pico_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_1_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_2_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_3_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_bn_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_bn_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_1_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_1_rw2_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_1_rw2_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_2_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_2_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_2_rw_384', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_3_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_nano_cc_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_nano_cc_224', pretrained=pretrained, **kwargs) @register_model def coatnext_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnext_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_0_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_0_224', pretrained=pretrained, **kwargs) @register_model def coatnet_1_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_1_224', pretrained=pretrained, **kwargs) @register_model def coatnet_2_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_2_224', pretrained=pretrained, **kwargs) @register_model def coatnet_3_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_3_224', pretrained=pretrained, **kwargs) @register_model def coatnet_4_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_4_224', pretrained=pretrained, **kwargs) @register_model def coatnet_5_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_5_224', pretrained=pretrained, **kwargs) @register_model def maxvit_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_pico_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_pico_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_small_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_base_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_base_rw_384', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_pm_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_pm_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_base_rw_224', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_base_rw_384', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_large_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_large_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_224', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_384', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_512', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_224', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_384', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_512', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_224', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_384', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_512', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_224', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_384', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_512', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_224', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_384', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/metaformer.py """""" from collections import OrderedDict from functools import partial from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, DropPath, SelectAdaptivePool2d, GroupNorm1, LayerNorm, LayerNorm2d, Mlp, use_fused_attn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['MetaFormer'] class Stem(nn.Module): def __init__(self, in_channels, out_channels, norm_layer=None): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=7, stride=4, padding=2) self.norm = norm_layer(out_channels) if norm_layer else nn.Identity() def forward(self, x): x = self.conv(x) x = self.norm(x) return x class Downsampling(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, norm_layer=None): super().__init__() self.norm = norm_layer(in_channels) if norm_layer else nn.Identity() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x): x = self.norm(x) x = self.conv(x) return x class Scale(nn.Module): def __init__(self, dim, init_value=1.0, trainable=True, use_nchw=True): super().__init__() self.shape = (dim, 1, 1) if use_nchw else (dim,) self.scale = nn.Parameter(init_value * torch.ones(dim), requires_grad=trainable) def forward(self, x): return x * self.scale.view(self.shape) class SquaredReLU(nn.Module): def __init__(self, inplace=False): super().__init__() self.relu = nn.ReLU(inplace=inplace) def forward(self, x): return torch.square(self.relu(x)) class StarReLU(nn.Module): def __init__(self, scale_value=1.0, bias_value=0.0, scale_learnable=True, bias_learnable=True, mode=None, inplace=False): super().__init__() self.inplace = inplace self.relu = nn.ReLU(inplace=inplace) self.scale = nn.Parameter(scale_value * torch.ones(1), requires_grad=scale_learnable) self.bias = nn.Parameter(bias_value * torch.ones(1), requires_grad=bias_learnable) def forward(self, x): return self.scale * self.relu(x) ** 2 + self.bias class Attention(nn.Module): fused_attn: Final[bool] def __init__(self, dim, head_dim=32, num_heads=None, qkv_bias=False, attn_drop=0.0, proj_drop=0.0, proj_bias=False, **kwargs): super().__init__() self.head_dim = head_dim self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.num_heads = num_heads if num_heads else dim // head_dim if self.num_heads == 0: self.num_heads = 1 self.attention_dim = self.num_heads * self.head_dim self.qkv = nn.Linear(dim, self.attention_dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(self.attention_dim, dim, bias=proj_bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class GroupNorm1NoBias(GroupNorm1): def __init__(self, num_channels, **kwargs): super().__init__(num_channels, **kwargs) self.eps = kwargs.get('eps', 1e-06) self.bias = None class LayerNorm2dNoBias(LayerNorm2d): def __init__(self, num_channels, **kwargs): super().__init__(num_channels, **kwargs) self.eps = kwargs.get('eps', 1e-06) self.bias = None class LayerNormNoBias(nn.LayerNorm): def __init__(self, num_channels, **kwargs): super().__init__(num_channels, **kwargs) self.eps = kwargs.get('eps', 1e-06) self.bias = None class SepConv(nn.Module): def __init__(self, dim, expansion_ratio=2, act1_layer=StarReLU, act2_layer=nn.Identity, bias=False, kernel_size=7, padding=3, **kwargs): super().__init__() mid_channels = int(expansion_ratio * dim) self.pwconv1 = nn.Conv2d(dim, mid_channels, kernel_size=1, bias=bias) self.act1 = act1_layer() self.dwconv = nn.Conv2d(mid_channels, mid_channels, kernel_size=kernel_size, padding=padding, groups=mid_channels, bias=bias) self.act2 = act2_layer() self.pwconv2 = nn.Conv2d(mid_channels, dim, kernel_size=1, bias=bias) def forward(self, x): x = self.pwconv1(x) x = self.act1(x) x = self.dwconv(x) x = self.act2(x) x = self.pwconv2(x) return x class Pooling(nn.Module): def __init__(self, pool_size=3, **kwargs): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, x): y = self.pool(x) return y - x class MlpHead(nn.Module): def __init__(self, dim, num_classes=1000, mlp_ratio=4, act_layer=SquaredReLU, norm_layer=LayerNorm, drop_rate=0.0, bias=True): super().__init__() hidden_features = int(mlp_ratio * dim) self.fc1 = nn.Linear(dim, hidden_features, bias=bias) self.act = act_layer() self.norm = norm_layer(hidden_features) self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) self.head_drop = nn.Dropout(drop_rate) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.norm(x) x = self.head_drop(x) x = self.fc2(x) return x class MetaFormerBlock(nn.Module): def __init__(self, dim, token_mixer=Pooling, mlp_act=StarReLU, mlp_bias=False, norm_layer=LayerNorm2d, proj_drop=0.0, drop_path=0.0, use_nchw=True, layer_scale_init_value=None, res_scale_init_value=None, **kwargs): super().__init__() ls_layer = partial(Scale, dim=dim, init_value=layer_scale_init_value, use_nchw=use_nchw) rs_layer = partial(Scale, dim=dim, init_value=res_scale_init_value, use_nchw=use_nchw) self.norm1 = norm_layer(dim) self.token_mixer = token_mixer(dim=dim, proj_drop=proj_drop, **kwargs) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_scale1 = ls_layer() if layer_scale_init_value is not None else nn.Identity() self.res_scale1 = rs_layer() if res_scale_init_value is not None else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(dim, int(4 * dim), act_layer=mlp_act, bias=mlp_bias, drop=proj_drop, use_conv=use_nchw) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_scale2 = ls_layer() if layer_scale_init_value is not None else nn.Identity() self.res_scale2 = rs_layer() if res_scale_init_value is not None else nn.Identity() def forward(self, x): x = self.res_scale1(x) + self.layer_scale1(self.drop_path1(self.token_mixer(self.norm1(x)))) x = self.res_scale2(x) + self.layer_scale2(self.drop_path2(self.mlp(self.norm2(x)))) return x class MetaFormerStage(nn.Module): def __init__(self, in_chs, out_chs, depth=2, token_mixer=nn.Identity, mlp_act=StarReLU, mlp_bias=False, downsample_norm=LayerNorm2d, norm_layer=LayerNorm2d, proj_drop=0.0, dp_rates=[0.0] * 2, layer_scale_init_value=None, res_scale_init_value=None, **kwargs): super().__init__() self.grad_checkpointing = False self.use_nchw = not issubclass(token_mixer, Attention) self.downsample = nn.Identity() if in_chs == out_chs else Downsampling(in_chs, out_chs, kernel_size=3, stride=2, padding=1, norm_layer=downsample_norm) self.blocks = nn.Sequential(*[MetaFormerBlock(dim=out_chs, token_mixer=token_mixer, mlp_act=mlp_act, mlp_bias=mlp_bias, norm_layer=norm_layer, proj_drop=proj_drop, drop_path=dp_rates[i], layer_scale_init_value=layer_scale_init_value, res_scale_init_value=res_scale_init_value, use_nchw=self.use_nchw, **kwargs) for i in range(depth)]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x: Tensor): x = self.downsample(x) (B, C, H, W) = x.shape if not self.use_nchw: x = x.reshape(B, C, -1).transpose(1, 2) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) if not self.use_nchw: x = x.transpose(1, 2).reshape(B, C, H, W) return x class MetaFormer(nn.Module): def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', depths=(2, 2, 6, 2), dims=(64, 128, 320, 512), token_mixers=Pooling, mlp_act=StarReLU, mlp_bias=False, drop_path_rate=0.0, proj_drop_rate=0.0, drop_rate=0.0, layer_scale_init_values=None, res_scale_init_values=(None, None, 1.0, 1.0), downsample_norm=LayerNorm2dNoBias, norm_layers=LayerNorm2dNoBias, output_norm=LayerNorm2d, use_mlp_head=True, **kwargs): super().__init__() self.num_classes = num_classes self.num_features = dims[-1] self.drop_rate = drop_rate self.use_mlp_head = use_mlp_head self.num_stages = len(depths) if not isinstance(depths, (list, tuple)): depths = [depths] if not isinstance(dims, (list, tuple)): dims = [dims] if not isinstance(token_mixers, (list, tuple)): token_mixers = [token_mixers] * self.num_stages if not isinstance(norm_layers, (list, tuple)): norm_layers = [norm_layers] * self.num_stages if not isinstance(layer_scale_init_values, (list, tuple)): layer_scale_init_values = [layer_scale_init_values] * self.num_stages if not isinstance(res_scale_init_values, (list, tuple)): res_scale_init_values = [res_scale_init_values] * self.num_stages self.grad_checkpointing = False self.feature_info = [] self.stem = Stem(in_chans, dims[0], norm_layer=downsample_norm) stages = [] prev_dim = dims[0] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] for i in range(self.num_stages): stages += [MetaFormerStage(prev_dim, dims[i], depth=depths[i], token_mixer=token_mixers[i], mlp_act=mlp_act, mlp_bias=mlp_bias, proj_drop=proj_drop_rate, dp_rates=dp_rates[i], layer_scale_init_value=layer_scale_init_values[i], res_scale_init_value=res_scale_init_values[i], downsample_norm=downsample_norm, norm_layer=norm_layers[i], **kwargs)] prev_dim = dims[i] self.feature_info += [dict(num_chs=dims[i], reduction=2, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) if num_classes > 0: if self.use_mlp_head: final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate) self.head_hidden_size = self.num_features else: final = nn.Linear(self.num_features, num_classes) self.head_hidden_size = self.num_features else: final = nn.Identity() self.head = nn.Sequential(OrderedDict([('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), ('norm', output_norm(self.num_features)), ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), ('drop', nn.Dropout(drop_rate) if self.use_mlp_head else nn.Identity()), ('fc', final)])) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for stage in self.stages: stage.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): if global_pool is not None: self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity() if num_classes > 0: if self.use_mlp_head: final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate) else: final = nn.Linear(self.num_features, num_classes) else: final = nn.Identity() self.head.fc = final def forward_head(self, x: Tensor, pre_logits: bool=False): x = self.head.global_pool(x) x = self.head.norm(x) x = self.head.flatten(x) x = self.head.drop(x) return x if pre_logits else self.head.fc(x) def forward_features(self, x: Tensor): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward(self, x: Tensor): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'stem.conv.weight' in state_dict: return state_dict import re out_dict = {} is_poolformerv1 = 'network.0.0.mlp.fc1.weight' in state_dict model_state_dict = model.state_dict() for (k, v) in state_dict.items(): if is_poolformerv1: k = re.sub('layer_scale_([0-9]+)', 'layer_scale\\1.scale', k) k = k.replace('network.1', 'downsample_layers.1') k = k.replace('network.3', 'downsample_layers.2') k = k.replace('network.5', 'downsample_layers.3') k = k.replace('network.2', 'network.1') k = k.replace('network.4', 'network.2') k = k.replace('network.6', 'network.3') k = k.replace('network', 'stages') k = re.sub('downsample_layers.([0-9]+)', 'stages.\\1.downsample', k) k = k.replace('downsample.proj', 'downsample.conv') k = k.replace('patch_embed.proj', 'patch_embed.conv') k = re.sub('([0-9]+).([0-9]+)', '\\1.blocks.\\2', k) k = k.replace('stages.0.downsample', 'patch_embed') k = k.replace('patch_embed', 'stem') k = k.replace('post_norm', 'norm') k = k.replace('pre_norm', 'norm') k = re.sub('^head', 'head.fc', k) k = re.sub('^norm', 'head.norm', k) if v.shape != model_state_dict[k] and v.numel() == model_state_dict[k].numel(): v = v.reshape(model_state_dict[k].shape) out_dict[k] = v return out_dict def _create_metaformer(variant, pretrained=False, **kwargs): default_out_indices = tuple((i for (i, _) in enumerate(kwargs.get('depths', (2, 2, 6, 2))))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(MetaFormer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head.fc', 'first_conv': 'stem.conv', **kwargs} default_cfgs = generate_default_cfgs({'poolformer_s12.sail_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.9), 'poolformer_s24.sail_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.9), 'poolformer_s36.sail_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.9), 'poolformer_m36.sail_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95), 'poolformer_m48.sail_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95), 'poolformerv2_s12.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_s24.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_s36.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_m36.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_m48.sail_in1k': _cfg(hf_hub_id='timm/'), 'convformer_s18.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s18.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s18.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s18.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s18.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'convformer_s36.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s36.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s36.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s36.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s36.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'convformer_m36.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_m36.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_m36.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_m36.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_m36.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'convformer_b36.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_b36.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_b36.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_b36.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_b36.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_s18.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s18.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s18.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s18.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s18.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_s36.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s36.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s36.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s36.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s36.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_m36.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_m36.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_m36.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_m36.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_m36.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_b36.sail_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_b36.sail_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_b36.sail_in22k_ft_in1k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_b36.sail_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_b36.sail_in22k': _cfg(hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841)}) @register_model def poolformer_s12(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-05, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_s12', pretrained=pretrained, **model_kwargs) @register_model def poolformer_s24(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[4, 4, 12, 4], dims=[64, 128, 320, 512], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-05, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_s24', pretrained=pretrained, **model_kwargs) @register_model def poolformer_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-06, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_s36', pretrained=pretrained, **model_kwargs) @register_model def poolformer_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[6, 6, 18, 6], dims=[96, 192, 384, 768], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-06, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_m36', pretrained=pretrained, **model_kwargs) @register_model def poolformer_m48(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[8, 8, 24, 8], dims=[96, 192, 384, 768], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-06, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_m48', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_s12(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_s12', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_s24(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[4, 4, 12, 4], dims=[64, 128, 320, 512], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_s24', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_s36', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[6, 6, 18, 6], dims=[96, 192, 384, 768], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_m36', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_m48(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[8, 8, 24, 8], dims=[96, 192, 384, 768], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_m48', pretrained=pretrained, **model_kwargs) @register_model def convformer_s18(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_s18', pretrained=pretrained, **model_kwargs) @register_model def convformer_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_s36', pretrained=pretrained, **model_kwargs) @register_model def convformer_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_m36', pretrained=pretrained, **model_kwargs) @register_model def convformer_b36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_b36', pretrained=pretrained, **model_kwargs) @register_model def caformer_s18(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_s18', pretrained=pretrained, **model_kwargs) @register_model def caformer_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_s36', pretrained=pretrained, **model_kwargs) @register_model def caformer_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_m36', pretrained=pretrained, **model_kwargs) @register_model def caformer_b36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_b36', pretrained=pretrained, **model_kwargs) # File: pytorch-image-models-main/timm/models/mlp_mixer.py """""" import math from functools import partial from typing import List, Optional, Union, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['MixerBlock', 'MlpMixer'] class MixerBlock(nn.Module): def __init__(self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, drop=0.0, drop_path=0.0): super().__init__() (tokens_dim, channels_dim) = [int(x * dim) for x in to_2tuple(mlp_ratio)] self.norm1 = norm_layer(dim) self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Affine(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, x): return torch.addcmul(self.beta, self.alpha, x) class ResBlock(nn.Module): def __init__(self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, act_layer=nn.GELU, init_values=0.0001, drop=0.0, drop_path=0.0): super().__init__() channel_dim = int(dim * mlp_ratio) self.norm1 = norm_layer(dim) self.linear_tokens = nn.Linear(seq_len, seq_len) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) self.ls1 = nn.Parameter(init_values * torch.ones(dim)) self.ls2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) return x class SpatialGatingUnit(nn.Module): def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): super().__init__() gate_dim = dim // 2 self.norm = norm_layer(gate_dim) self.proj = nn.Linear(seq_len, seq_len) def init_weights(self): nn.init.normal_(self.proj.weight, std=1e-06) nn.init.ones_(self.proj.bias) def forward(self, x): (u, v) = x.chunk(2, dim=-1) v = self.norm(v) v = self.proj(v.transpose(-1, -2)) return u * v.transpose(-1, -2) class SpatialGatingBlock(nn.Module): def __init__(self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, drop=0.0, drop_path=0.0): super().__init__() channel_dim = int(dim * mlp_ratio) self.norm = norm_layer(dim) sgu = partial(SpatialGatingUnit, seq_len=seq_len) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x + self.drop_path(self.mlp_channels(self.norm(x))) return x class MlpMixer(nn.Module): def __init__(self, num_classes=1000, img_size=224, in_chans=3, patch_size=16, num_blocks=8, embed_dim=512, mlp_ratio=(0.5, 4.0), block_layer=MixerBlock, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, drop_rate=0.0, proj_drop_rate=0.0, drop_path_rate=0.0, nlhb=False, stem_norm=False, global_pool='avg'): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.grad_checkpointing = False self.stem = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None) reduction = self.stem.feat_ratio() if hasattr(self.stem, 'feat_ratio') else patch_size self.blocks = nn.Sequential(*[block_layer(embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, drop=proj_drop_rate, drop_path=drop_path_rate) for _ in range(num_blocks)]) self.feature_info = [dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=reduction) for i in range(num_blocks)] self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.init_weights(nlhb=nlhb) @torch.jit.ignore def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0.0 named_apply(partial(_init_weights, head_bias=head_bias), module=self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape x = self.stem(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x) if i in take_indices: intermediates.append(self.norm(x) if norm else x) if reshape: (H, W) = self.stem.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str, head_bias: float=0.0, flax=False): if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif flax: lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-06) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def checkpoint_filter_fn(state_dict, model): if 'patch_embed.proj.weight' in state_dict: out_dict = {} for (k, v) in state_dict.items(): k = k.replace('patch_embed.', 'stem.') k = k.replace('attn.', 'linear_tokens.') k = k.replace('mlp.', 'mlp_channels.') k = k.replace('gamma_', 'ls') if k.endswith('.alpha') or k.endswith('.beta'): v = v.reshape(1, 1, -1) out_dict[k] = v return out_dict return state_dict def _create_mixer(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg(MlpMixer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'mixer_s32_224.untrained': _cfg(), 'mixer_s16_224.untrained': _cfg(), 'mixer_b32_224.untrained': _cfg(), 'mixer_b16_224.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth'), 'mixer_b16_224.goog_in21k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', num_classes=21843), 'mixer_l32_224.untrained': _cfg(), 'mixer_l16_224.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth'), 'mixer_l16_224.goog_in21k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', num_classes=21843), 'mixer_b16_224.miil_in21k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil_in21k-2a558a71.pth', mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), crop_pct=0.875, interpolation='bilinear', num_classes=11221), 'mixer_b16_224.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil-9229a591.pth', mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), crop_pct=0.875, interpolation='bilinear'), 'gmixer_12_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmixer_24_224.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_distilled_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_distilled_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_distilled_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_distilled_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_dino': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_dino': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmlp_ti16_224.untrained': _cfg(), 'gmlp_s16_224.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth'), 'gmlp_b16_224.untrained': _cfg()}) @register_model def mixer_s32_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_s16_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b32_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b16_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l32_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l16_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_12_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_24_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_12_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_24_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-05), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_36_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-06), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_big_24_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-06), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_ti16_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_s16_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_b16_224(pretrained=False, **kwargs) -> MlpMixer: model_args = dict(patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) return model register_model_deprecations(__name__, {'mixer_b16_224_in21k': 'mixer_b16_224.goog_in21k_ft_in1k', 'mixer_l16_224_in21k': 'mixer_l16_224.goog_in21k_ft_in1k', 'mixer_b16_224_miil': 'mixer_b16_224.miil_in21k_ft_in1k', 'mixer_b16_224_miil_in21k': 'mixer_b16_224.miil_in21k', 'resmlp_12_distilled_224': 'resmlp_12_224.fb_distilled_in1k', 'resmlp_24_distilled_224': 'resmlp_24_224.fb_distilled_in1k', 'resmlp_36_distilled_224': 'resmlp_36_224.fb_distilled_in1k', 'resmlp_big_24_distilled_224': 'resmlp_big_24_224.fb_distilled_in1k', 'resmlp_big_24_224_in22ft1k': 'resmlp_big_24_224.fb_in22k_ft_in1k', 'resmlp_12_224_dino': 'resmlp_12_224', 'resmlp_24_224_dino': 'resmlp_24_224'}) # File: pytorch-image-models-main/timm/models/mobilenetv3.py """""" from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import SelectAdaptivePool2d, Linear, LayerType, PadType, create_conv2d, get_norm_act_layer from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks, feature_take_indices from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['MobileNetV3', 'MobileNetV3Features'] class MobileNetV3(nn.Module): def __init__(self, block_args: BlockArgs, num_classes: int=1000, in_chans: int=3, stem_size: int=16, fix_stem: bool=False, num_features: int=1280, head_bias: bool=True, head_norm: bool=False, pad_type: str='', act_layer: Optional[LayerType]=None, norm_layer: Optional[LayerType]=None, aa_layer: Optional[LayerType]=None, se_layer: Optional[LayerType]=None, se_from_exp: bool=True, round_chs_fn: Callable=round_channels, drop_rate: float=0.0, drop_path_rate: float=0.0, layer_scale_init_value: Optional[float]=None, global_pool: str='avg'): super(MobileNetV3, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) builder = EfficientNetBuilder(output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, layer_scale_init_value=layer_scale_init_value) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features self.stage_ends = [f['stage'] for f in self.feature_info] self.num_features = builder.in_chs self.head_hidden_size = num_features self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) num_pooled_chs = self.num_features * self.global_pool.feat_mult() if head_norm: self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type) self.norm_head = norm_act_layer(self.head_hidden_size) self.act2 = nn.Identity() else: self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type, bias=head_bias) self.norm_head = nn.Identity() self.act2 = act_layer(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() efficientnet_init_weights(self) def as_sequential(self): layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.global_pool, self.conv_head, self.norm_head, self.act2]) layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse: bool=False): return dict(stem='^conv_stem|bn1', blocks='^blocks\\.(\\d+)' if coarse else '^blocks\\.(\\d+)\\.(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False, extra_blocks: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' if stop_early: assert intermediates_only, 'Must use intermediates_only for early stopping.' intermediates = [] if extra_blocks: (take_indices, max_index) = feature_take_indices(len(self.blocks) + 1, indices) else: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] feat_idx = 0 x = self.conv_stem(x) x = self.bn1(x) if feat_idx in take_indices: intermediates.append(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index] for blk in blocks: feat_idx += 1 x = blk(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True, extra_blocks: bool=False): if extra_blocks: (take_indices, max_index) = feature_take_indices(len(self.blocks) + 1, indices) else: (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.blocks = self.blocks[:max_index] if max_index < len(self.blocks): self.conv_head = nn.Identity() self.norm_head = nn.Identity() if prune_head: self.conv_head = nn.Identity() self.norm_head = nn.Identity() self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool=False) -> torch.Tensor: x = self.global_pool(x) x = self.conv_head(x) x = self.norm_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) if pre_logits: return x return self.classifier(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x class MobileNetV3Features(nn.Module): def __init__(self, block_args: BlockArgs, out_indices: Tuple[int, ...]=(0, 1, 2, 3, 4), feature_location: str='bottleneck', in_chans: int=3, stem_size: int=16, fix_stem: bool=False, output_stride: int=32, pad_type: PadType='', round_chs_fn: Callable=round_channels, se_from_exp: bool=True, act_layer: Optional[LayerType]=None, norm_layer: Optional[LayerType]=None, aa_layer: Optional[LayerType]=None, se_layer: Optional[LayerType]=None, drop_rate: float=0.0, drop_path_rate: float=0.0, layer_scale_init_value: Optional[float]=None): super(MobileNetV3Features, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_layer(stem_size) self.act1 = act_layer(inplace=True) builder = EfficientNetBuilder(output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, layer_scale_init_value=layer_scale_init_value, feature_location=feature_location) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True): self.grad_checkpointing = enable def forward(self, x: torch.Tensor) -> List[torch.Tensor]: x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) for (i, b) in enumerate(self.blocks): if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_mnv3(variant: str, pretrained: bool=False, **kwargs) -> MobileNetV3: features_mode = '' model_cls = MobileNetV3 kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs or 'feature_cls' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'head_norm', 'global_pool') model_cls = MobileNetV3Features features_mode = 'cls' model = build_model_with_cfg(model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs) if features_mode == 'cls': model.default_cfg = pretrained_cfg_for_features(model.default_cfg) return model def _gen_mobilenet_v3_rw(variant: str, channel_multiplier: float=1.0, pretrained: bool=False, **kwargs) -> MobileNetV3: arch_def = [['ds_r1_k3_s1_e1_c16_nre_noskip'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']] model_kwargs = dict(block_args=decode_arch_def(arch_def), head_bias=False, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), **kwargs) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v3(variant: str, channel_multiplier: float=1.0, depth_multiplier: float=1.0, group_size=None, pretrained: bool=False, **kwargs) -> MobileNetV3: if 'small' in variant: num_features = 1024 if 'minimal' in variant: act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['ds_r1_k3_s2_e1_c16'], ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], ['ir_r2_k3_s1_e3_c48'], ['ir_r3_k3_s2_e6_c96'], ['cn_r1_k1_s1_c576']] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [['ds_r1_k3_s2_e1_c16_se0.25_nre'], ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], ['ir_r2_k5_s1_e3_c48_se0.25'], ['ir_r3_k5_s2_e6_c96_se0.25'], ['cn_r1_k1_s1_c576']] else: num_features = 1280 if 'minimal' in variant: act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], ['ir_r3_k3_s2_e3_c40'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112'], ['ir_r3_k3_s2_e6_c160'], ['cn_r1_k1_s1_c960']] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']] se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, group_size=group_size), num_features=num_features, stem_size=16, fix_stem=channel_multiplier < 0.75, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, se_layer=se_layer, **kwargs) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_fbnetv3(variant: str, channel_multiplier: float=1.0, pretrained: bool=False, **kwargs): vl = variant.split('_')[-1] if vl in ('a', 'b'): stem_size = 16 arch_def = [['ds_r2_k3_s1_e1_c16'], ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], ['cn_r1_k1_s1_c1344']] elif vl == 'd': stem_size = 24 arch_def = [['ds_r2_k3_s1_e1_c16'], ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], ['cn_r1_k1_s1_c1440']] elif vl == 'g': stem_size = 32 arch_def = [['ds_r3_k3_s1_e1_c24'], ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], ['cn_r1_k1_s1_c1728']] else: raise NotImplemented round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) act_layer = resolve_act_layer(kwargs, 'hard_swish') model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=1984, head_bias=False, stem_size=stem_size, round_chs_fn=round_chs_fn, se_from_exp=False, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, se_layer=se_layer, **kwargs) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_lcnet(variant: str, channel_multiplier: float=1.0, pretrained: bool=False, **kwargs): arch_def = [['dsa_r1_k3_s1_c32'], ['dsa_r2_k3_s2_c64'], ['dsa_r2_k3_s2_c128'], ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], ['dsa_r4_k5_s1_c256'], ['dsa_r2_k5_s2_c512_se0.25']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), num_features=1280, **kwargs) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v4(variant: str, channel_multiplier: float=1.0, group_size=None, pretrained: bool=False, **kwargs) -> MobileNetV3: num_features = 1280 if 'hybrid' in variant: layer_scale_init_value = 1e-05 if 'medium' in variant: stem_size = 32 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['er_r1_k3_s2_e4_c48'], ['uir_r1_a3_k5_s2_e4_c80', 'uir_r1_a3_k3_s1_e2_c80'], ['uir_r1_a3_k5_s2_e6_c160', 'uir_r1_a0_k0_s1_e2_c160', 'uir_r1_a3_k3_s1_e4_c160', 'uir_r1_a3_k5_s1_e4_c160', 'mqa_r1_k3_h4_s1_v2_d64_c160', 'uir_r1_a3_k3_s1_e4_c160', 'mqa_r1_k3_h4_s1_v2_d64_c160', 'uir_r1_a3_k0_s1_e4_c160', 'mqa_r1_k3_h4_s1_v2_d64_c160', 'uir_r1_a3_k3_s1_e4_c160', 'mqa_r1_k3_h4_s1_v2_d64_c160', 'uir_r1_a3_k0_s1_e4_c160'], ['uir_r1_a5_k5_s2_e6_c256', 'uir_r1_a5_k5_s1_e4_c256', 'uir_r2_a3_k5_s1_e4_c256', 'uir_r1_a0_k0_s1_e2_c256', 'uir_r1_a3_k5_s1_e2_c256', 'uir_r1_a0_k0_s1_e2_c256', 'uir_r1_a0_k0_s1_e4_c256', 'mqa_r1_k3_h4_s1_d64_c256', 'uir_r1_a3_k0_s1_e4_c256', 'mqa_r1_k3_h4_s1_d64_c256', 'uir_r1_a5_k5_s1_e4_c256', 'mqa_r1_k3_h4_s1_d64_c256', 'uir_r1_a5_k0_s1_e4_c256', 'mqa_r1_k3_h4_s1_d64_c256', 'uir_r1_a5_k0_s1_e4_c256'], ['cn_r1_k1_s1_c960']] elif 'large' in variant: stem_size = 24 act_layer = resolve_act_layer(kwargs, 'gelu') arch_def = [['er_r1_k3_s2_e4_c48'], ['uir_r1_a3_k5_s2_e4_c96', 'uir_r1_a3_k3_s1_e4_c96'], ['uir_r1_a3_k5_s2_e4_c192', 'uir_r3_a3_k3_s1_e4_c192', 'uir_r1_a3_k5_s1_e4_c192', 'uir_r2_a5_k3_s1_e4_c192', 'mqa_r1_k3_h8_s1_v2_d48_c192', 'uir_r1_a5_k3_s1_e4_c192', 'mqa_r1_k3_h8_s1_v2_d48_c192', 'uir_r1_a5_k3_s1_e4_c192', 'mqa_r1_k3_h8_s1_v2_d48_c192', 'uir_r1_a5_k3_s1_e4_c192', 'mqa_r1_k3_h8_s1_v2_d48_c192', 'uir_r1_a3_k0_s1_e4_c192'], ['uir_r4_a5_k5_s2_e4_c512', 'uir_r1_a5_k0_s1_e4_c512', 'uir_r1_a5_k3_s1_e4_c512', 'uir_r2_a5_k0_s1_e4_c512', 'uir_r1_a5_k3_s1_e4_c512', 'uir_r1_a5_k5_s1_e4_c512', 'mqa_r1_k3_h8_s1_d64_c512', 'uir_r1_a5_k0_s1_e4_c512', 'mqa_r1_k3_h8_s1_d64_c512', 'uir_r1_a5_k0_s1_e4_c512', 'mqa_r1_k3_h8_s1_d64_c512', 'uir_r1_a5_k0_s1_e4_c512', 'mqa_r1_k3_h8_s1_d64_c512', 'uir_r1_a5_k0_s1_e4_c512'], ['cn_r1_k1_s1_c960']] else: assert False, f'Unknown variant {variant}.' else: layer_scale_init_value = None if 'small' in variant: stem_size = 32 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['cn_r1_k3_s2_e1_c32', 'cn_r1_k1_s1_e1_c32'], ['cn_r1_k3_s2_e1_c96', 'cn_r1_k1_s1_e1_c64'], ['uir_r1_a5_k5_s2_e3_c96', 'uir_r4_a0_k3_s1_e2_c96', 'uir_r1_a3_k0_s1_e4_c96'], ['uir_r1_a3_k3_s2_e6_c128', 'uir_r1_a5_k5_s1_e4_c128', 'uir_r1_a0_k5_s1_e4_c128', 'uir_r1_a0_k5_s1_e3_c128', 'uir_r2_a0_k3_s1_e4_c128'], ['cn_r1_k1_s1_c960']] elif 'medium' in variant: stem_size = 32 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['er_r1_k3_s2_e4_c48'], ['uir_r1_a3_k5_s2_e4_c80', 'uir_r1_a3_k3_s1_e2_c80'], ['uir_r1_a3_k5_s2_e6_c160', 'uir_r2_a3_k3_s1_e4_c160', 'uir_r1_a3_k5_s1_e4_c160', 'uir_r1_a3_k3_s1_e4_c160', 'uir_r1_a3_k0_s1_e4_c160', 'uir_r1_a0_k0_s1_e2_c160', 'uir_r1_a3_k0_s1_e4_c160'], ['uir_r1_a5_k5_s2_e6_c256', 'uir_r1_a5_k5_s1_e4_c256', 'uir_r2_a3_k5_s1_e4_c256', 'uir_r1_a0_k0_s1_e4_c256', 'uir_r1_a3_k0_s1_e4_c256', 'uir_r1_a3_k5_s1_e2_c256', 'uir_r1_a5_k5_s1_e4_c256', 'uir_r2_a0_k0_s1_e4_c256', 'uir_r1_a5_k0_s1_e2_c256'], ['cn_r1_k1_s1_c960']] elif 'large' in variant: stem_size = 24 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['er_r1_k3_s2_e4_c48'], ['uir_r1_a3_k5_s2_e4_c96', 'uir_r1_a3_k3_s1_e4_c96'], ['uir_r1_a3_k5_s2_e4_c192', 'uir_r3_a3_k3_s1_e4_c192', 'uir_r1_a3_k5_s1_e4_c192', 'uir_r5_a5_k3_s1_e4_c192', 'uir_r1_a3_k0_s1_e4_c192'], ['uir_r4_a5_k5_s2_e4_c512', 'uir_r1_a5_k0_s1_e4_c512', 'uir_r1_a5_k3_s1_e4_c512', 'uir_r2_a5_k0_s1_e4_c512', 'uir_r1_a5_k3_s1_e4_c512', 'uir_r1_a5_k5_s1_e4_c512', 'uir_r3_a5_k0_s1_e4_c512'], ['cn_r1_k1_s1_c960']] else: assert False, f'Unknown variant {variant}.' model_kwargs = dict(block_args=decode_arch_def(arch_def, group_size=group_size), head_bias=False, head_norm=True, num_features=num_features, stem_size=stem_size, fix_stem=channel_multiplier < 1.0, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, layer_scale_init_value=layer_scale_init_value, **kwargs) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _cfg(url: str='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'mobilenetv3_large_075.untrained': _cfg(url=''), 'mobilenetv3_large_100.ra_in1k': _cfg(interpolation='bicubic', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth', hf_hub_id='timm/'), 'mobilenetv3_large_100.ra4_e3600_r224_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0), 'mobilenetv3_large_100.miil_in21k_ft_in1k': _cfg(interpolation='bilinear', mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', paper_ids='arXiv:2104.10972v4', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth', hf_hub_id='timm/'), 'mobilenetv3_large_100.miil_in21k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth', hf_hub_id='timm/', origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', paper_ids='arXiv:2104.10972v4', interpolation='bilinear', mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), num_classes=11221), 'mobilenetv3_large_150d.ra4_e3600_r256_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'mobilenetv3_small_050.lamb_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_small_075.lamb_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_small_100.lamb_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_rw.rmsp_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', hf_hub_id='timm/', interpolation='bicubic'), 'tf_mobilenetv3_large_075.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_large_100.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_large_minimal_100.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_075.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_100.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_minimal_100.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'fbnetv3_b.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=0.95), 'fbnetv3_d.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=0.95), 'fbnetv3_g.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)), 'lcnet_035.untrained': _cfg(), 'lcnet_050.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth', hf_hub_id='timm/', interpolation='bicubic'), 'lcnet_075.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth', hf_hub_id='timm/', interpolation='bicubic'), 'lcnet_100.ra2_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth', hf_hub_id='timm/', interpolation='bicubic'), 'lcnet_150.untrained': _cfg(), 'mobilenetv4_conv_small.e2400_r224_in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_small.e1200_r224_in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_medium.e500_r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_medium.e500_r224_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_large.e600_r384_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_large.e500_r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.ix_e550_r256_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.ix_e550_r384_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.e500_r224_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.e200_r256_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_large.ix_e600_r384_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_large.e600_r384_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_medium.untrained': _cfg(input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_blur_medium.e500_r224_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=0.95, test_input_size=(3, 544, 544), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e600_r384_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e230_r384_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium_075.untrained': _cfg(crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_hybrid_large_075.untrained': _cfg(input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic')}) @register_model def mobilenetv3_large_075(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_large_100(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_large_150d(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v3('mobilenetv3_large_150d', 1.5, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_050(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_075(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_100(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_rw(pretrained: bool=False, **kwargs) -> MobileNetV3: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_075(pretrained: bool=False, **kwargs) -> MobileNetV3: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_100(pretrained: bool=False, **kwargs) -> MobileNetV3: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_minimal_100(pretrained: bool=False, **kwargs) -> MobileNetV3: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_075(pretrained: bool=False, **kwargs) -> MobileNetV3: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_100(pretrained: bool=False, **kwargs) -> MobileNetV3: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_minimal_100(pretrained: bool=False, **kwargs) -> MobileNetV3: kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_b(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_d(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_g(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) return model @register_model def lcnet_035(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def lcnet_050(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def lcnet_075(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def lcnet_100(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def lcnet_150(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_small(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_conv_small', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_medium(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_conv_medium', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_large(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_conv_large', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_hybrid_medium(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_hybrid_large(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_hybrid_large', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_aa_medium(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_conv_aa_medium', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs) return model @register_model def mobilenetv4_conv_blur_medium(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_conv_blur_medium', 1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs) return model @register_model def mobilenetv4_conv_aa_large(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_conv_aa_large', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs) return model @register_model def mobilenetv4_hybrid_medium_075(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_hybrid_large_075(pretrained: bool=False, **kwargs) -> MobileNetV3: model = _gen_mobilenet_v4('mobilenetv4_hybrid_large_075', 0.75, pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, {'mobilenetv3_large_100_miil': 'mobilenetv3_large_100.miil_in21k_ft_in1k', 'mobilenetv3_large_100_miil_in21k': 'mobilenetv3_large_100.miil_in21k'}) # File: pytorch-image-models-main/timm/models/mobilevit.py """""" import math from typing import Callable, Tuple, Optional import torch import torch.nn.functional as F from torch import nn from timm.layers import to_2tuple, make_divisible, GroupNorm1, ConvMlp, DropPath, is_exportable from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups from .vision_transformer import Block as TransformerBlock __all__ = [] def _inverted_residual_block(d, c, s, br=4.0): return ByoBlockCfg(type='bottle', d=d, c=c, s=s, gs=1, br=br, block_kwargs=dict(bottle_in=True, linear_out=True)) def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0): return (_inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg(type='mobilevit', d=1, c=c, s=1, block_kwargs=dict(transformer_dim=transformer_dim, transformer_depth=transformer_depth, patch_size=patch_size))) def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5): return (_inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg(type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1, block_kwargs=dict(transformer_depth=transformer_depth, patch_size=patch_size))) def _mobilevitv2_cfg(multiplier=1.0): chs = (64, 128, 256, 384, 512) if multiplier != 1.0: chs = tuple([int(c * multiplier) for c in chs]) cfg = ByoModelCfg(blocks=(_inverted_residual_block(d=1, c=chs[0], s=1, br=2.0), _inverted_residual_block(d=2, c=chs[1], s=2, br=2.0), _mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2), _mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4), _mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3)), stem_chs=int(32 * multiplier), stem_type='3x3', stem_pool='', downsample='', act_layer='silu') return cfg model_cfgs = dict(mobilevit_xxs=ByoModelCfg(blocks=(_inverted_residual_block(d=1, c=16, s=1, br=2.0), _inverted_residual_block(d=3, c=24, s=2, br=2.0), _mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0), _mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0), _mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0)), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=320), mobilevit_xs=ByoModelCfg(blocks=(_inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=48, s=2), _mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2)), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=384), mobilevit_s=ByoModelCfg(blocks=(_inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2)), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=640), semobilevit_s=ByoModelCfg(blocks=(_inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2)), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', attn_layer='se', attn_kwargs=dict(rd_ratio=1 / 8), num_features=640), mobilevitv2_050=_mobilevitv2_cfg(0.5), mobilevitv2_075=_mobilevitv2_cfg(0.75), mobilevitv2_125=_mobilevitv2_cfg(1.25), mobilevitv2_100=_mobilevitv2_cfg(1.0), mobilevitv2_150=_mobilevitv2_cfg(1.5), mobilevitv2_175=_mobilevitv2_cfg(1.75), mobilevitv2_200=_mobilevitv2_cfg(2.0)) @register_notrace_module class MobileVitBlock(nn.Module): def __init__(self, in_chs: int, out_chs: Optional[int]=None, kernel_size: int=3, stride: int=1, bottle_ratio: float=1.0, group_size: Optional[int]=None, dilation: Tuple[int, int]=(1, 1), mlp_ratio: float=2.0, transformer_dim: Optional[int]=None, transformer_depth: int=2, patch_size: int=8, num_heads: int=4, attn_drop: float=0.0, drop: int=0.0, no_fusion: bool=False, drop_path_rate: float=0.0, layers: LayerFn=None, transformer_norm_layer: Callable=nn.LayerNorm, **kwargs): super(MobileVitBlock, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act(in_chs, in_chs, kernel_size=kernel_size, stride=stride, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[TransformerBlock(transformer_dim, mlp_ratio=mlp_ratio, num_heads=num_heads, qkv_bias=True, attn_drop=attn_drop, proj_drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer) for _ in range(transformer_depth)]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1) if no_fusion: self.conv_fusion = None else: self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.conv_kxk(x) x = self.conv_1x1(x) (patch_h, patch_w) = self.patch_size (B, C, H, W) = x.shape (new_h, new_w) = (math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w) (num_patch_h, num_patch_w) = (new_h // patch_h, new_w // patch_w) num_patches = num_patch_h * num_patch_w interpolate = False if new_h != H or new_w != W: x = F.interpolate(x, size=(new_h, new_w), mode='bilinear', align_corners=False) interpolate = True x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2) x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1) x = self.transformer(x) x = self.norm(x) x = x.contiguous().view(B, self.patch_area, num_patches, -1) x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w) x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) if interpolate: x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=False) x = self.conv_proj(x) if self.conv_fusion is not None: x = self.conv_fusion(torch.cat((shortcut, x), dim=1)) return x class LinearSelfAttention(nn.Module): def __init__(self, embed_dim: int, attn_drop: float=0.0, proj_drop: float=0.0, bias: bool=True) -> None: super().__init__() self.embed_dim = embed_dim self.qkv_proj = nn.Conv2d(in_channels=embed_dim, out_channels=1 + 2 * embed_dim, bias=bias, kernel_size=1) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Conv2d(in_channels=embed_dim, out_channels=embed_dim, bias=bias, kernel_size=1) self.out_drop = nn.Dropout(proj_drop) def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor: qkv = self.qkv_proj(x) (query, key, value) = qkv.split([1, self.embed_dim, self.embed_dim], dim=1) context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) context_vector = (key * context_scores).sum(dim=-1, keepdim=True) out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out @torch.jit.ignore() def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor]=None) -> torch.Tensor: (batch_size, in_dim, kv_patch_area, kv_num_patches) = x.shape (q_patch_area, q_num_patches) = x.shape[-2:] assert kv_patch_area == q_patch_area, 'The number of pixels in a patch for query and key_value should be the same' qk = F.conv2d(x_prev, weight=self.qkv_proj.weight[:self.embed_dim + 1], bias=self.qkv_proj.bias[:self.embed_dim + 1]) (query, key) = qk.split([1, self.embed_dim], dim=1) value = F.conv2d(x, weight=self.qkv_proj.weight[self.embed_dim + 1], bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None) context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) context_vector = (key * context_scores).sum(dim=-1, keepdim=True) out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor]=None) -> torch.Tensor: if x_prev is None: return self._forward_self_attn(x) else: return self._forward_cross_attn(x, x_prev=x_prev) class LinearTransformerBlock(nn.Module): def __init__(self, embed_dim: int, mlp_ratio: float=2.0, drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, act_layer=None, norm_layer=None) -> None: super().__init__() act_layer = act_layer or nn.SiLU norm_layer = norm_layer or GroupNorm1 self.norm1 = norm_layer(embed_dim) self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop) self.drop_path1 = DropPath(drop_path) self.norm2 = norm_layer(embed_dim) self.mlp = ConvMlp(in_features=embed_dim, hidden_features=int(embed_dim * mlp_ratio), act_layer=act_layer, drop=drop) self.drop_path2 = DropPath(drop_path) def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor]=None) -> torch.Tensor: if x_prev is None: x = x + self.drop_path1(self.attn(self.norm1(x))) else: res = x x = self.norm1(x) x = self.attn(x, x_prev) x = self.drop_path1(x) + res x = x + self.drop_path2(self.mlp(self.norm2(x))) return x @register_notrace_module class MobileVitV2Block(nn.Module): def __init__(self, in_chs: int, out_chs: Optional[int]=None, kernel_size: int=3, bottle_ratio: float=1.0, group_size: Optional[int]=1, dilation: Tuple[int, int]=(1, 1), mlp_ratio: float=2.0, transformer_dim: Optional[int]=None, transformer_depth: int=2, patch_size: int=8, attn_drop: float=0.0, drop: int=0.0, drop_path_rate: float=0.0, layers: LayerFn=None, transformer_norm_layer: Callable=GroupNorm1, **kwargs): super(MobileVitV2Block, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act(in_chs, in_chs, kernel_size=kernel_size, stride=1, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[LinearTransformerBlock(transformer_dim, mlp_ratio=mlp_ratio, attn_drop=attn_drop, drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer) for _ in range(transformer_depth)]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] self.coreml_exportable = is_exportable() def forward(self, x: torch.Tensor) -> torch.Tensor: (B, C, H, W) = x.shape (patch_h, patch_w) = self.patch_size (new_h, new_w) = (math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w) (num_patch_h, num_patch_w) = (new_h // patch_h, new_w // patch_w) num_patches = num_patch_h * num_patch_w if new_h != H or new_w != W: x = F.interpolate(x, size=(new_h, new_w), mode='bilinear', align_corners=True) x = self.conv_kxk(x) x = self.conv_1x1(x) C = x.shape[1] if self.coreml_exportable: x = F.unfold(x, kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w)) else: x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4) x = x.reshape(B, C, -1, num_patches) x = self.transformer(x) x = self.norm(x) if self.coreml_exportable: x = x.reshape(B, C * patch_h * patch_w, num_patch_h, num_patch_w) x = F.pixel_shuffle(x, upscale_factor=patch_h) else: x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3) x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) x = self.conv_proj(x) return x register_block('mobilevit', MobileVitBlock) register_block('mobilevit2', MobileVitV2Block) def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg(ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg(ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': (0.0, 0.0, 0.0), 'std': (1.0, 1.0, 1.0), 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'fixed_input_size': False, **kwargs} default_cfgs = generate_default_cfgs({'mobilevit_xxs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_xs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_s.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevitv2_050.cvnets_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_075.cvnets_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_100.cvnets_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_125.cvnets_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in22k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in22k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_175.cvnets_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_200.cvnets_in22k_ft_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0)}) @register_model def mobilevit_xxs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs) @register_model def mobilevit_xs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs) @register_model def mobilevit_s(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_050(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_075(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_100(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_125(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_150(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_175(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_200(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, {'mobilevitv2_150_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k', 'mobilevitv2_175_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k', 'mobilevitv2_200_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k', 'mobilevitv2_150_384_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k_384', 'mobilevitv2_175_384_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k_384', 'mobilevitv2_200_384_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k_384'}) # File: pytorch-image-models-main/timm/models/mvitv2.py """""" import operator from collections import OrderedDict from dataclasses import dataclass from functools import partial, reduce from typing import Union, List, Tuple, Optional import torch import torch.utils.checkpoint as checkpoint from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, trunc_normal_tf_, get_norm_layer, to_2tuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._registry import register_model, register_model_deprecations, generate_default_cfgs __all__ = ['MultiScaleVit', 'MultiScaleVitCfg'] @dataclass class MultiScaleVitCfg: depths: Tuple[int, ...] = (2, 3, 16, 3) embed_dim: Union[int, Tuple[int, ...]] = 96 num_heads: Union[int, Tuple[int, ...]] = 1 mlp_ratio: float = 4.0 pool_first: bool = False expand_attn: bool = True qkv_bias: bool = True use_cls_token: bool = False use_abs_pos: bool = False residual_pooling: bool = True mode: str = 'conv' kernel_qkv: Tuple[int, int] = (3, 3) stride_q: Optional[Tuple[Tuple[int, int]]] = ((1, 1), (2, 2), (2, 2), (2, 2)) stride_kv: Optional[Tuple[Tuple[int, int]]] = None stride_kv_adaptive: Optional[Tuple[int, int]] = (4, 4) patch_kernel: Tuple[int, int] = (7, 7) patch_stride: Tuple[int, int] = (4, 4) patch_padding: Tuple[int, int] = (3, 3) pool_type: str = 'max' rel_pos_type: str = 'spatial' act_layer: Union[str, Tuple[str, str]] = 'gelu' norm_layer: Union[str, Tuple[str, str]] = 'layernorm' norm_eps: float = 1e-06 def __post_init__(self): num_stages = len(self.depths) if not isinstance(self.embed_dim, (tuple, list)): self.embed_dim = tuple((self.embed_dim * 2 ** i for i in range(num_stages))) assert len(self.embed_dim) == num_stages if not isinstance(self.num_heads, (tuple, list)): self.num_heads = tuple((self.num_heads * 2 ** i for i in range(num_stages))) assert len(self.num_heads) == num_stages if self.stride_kv_adaptive is not None and self.stride_kv is None: _stride_kv = self.stride_kv_adaptive pool_kv_stride = [] for i in range(num_stages): if min(self.stride_q[i]) > 1: _stride_kv = [max(_stride_kv[d] // self.stride_q[i][d], 1) for d in range(len(_stride_kv))] pool_kv_stride.append(tuple(_stride_kv)) self.stride_kv = tuple(pool_kv_stride) def prod(iterable): return reduce(operator.mul, iterable, 1) class PatchEmbed(nn.Module): def __init__(self, dim_in=3, dim_out=768, kernel=(7, 7), stride=(4, 4), padding=(3, 3)): super().__init__() self.proj = nn.Conv2d(dim_in, dim_out, kernel_size=kernel, stride=stride, padding=padding) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: x = self.proj(x) return (x.flatten(2).transpose(1, 2), x.shape[-2:]) @register_notrace_function def reshape_pre_pool(x, feat_size: List[int], has_cls_token: bool=True) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (H, W) = feat_size if has_cls_token: (cls_tok, x) = (x[:, :, :1, :], x[:, :, 1:, :]) else: cls_tok = None x = x.reshape(-1, H, W, x.shape[-1]).permute(0, 3, 1, 2).contiguous() return (x, cls_tok) @register_notrace_function def reshape_post_pool(x, num_heads: int, cls_tok: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, List[int]]: feat_size = [x.shape[2], x.shape[3]] L_pooled = x.shape[2] * x.shape[3] x = x.reshape(-1, num_heads, x.shape[1], L_pooled).transpose(2, 3) if cls_tok is not None: x = torch.cat((cls_tok, x), dim=2) return (x, feat_size) @register_notrace_function def cal_rel_pos_type(attn: torch.Tensor, q: torch.Tensor, has_cls_token: bool, q_size: List[int], k_size: List[int], rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor): sp_idx = 1 if has_cls_token else 0 (q_h, q_w) = q_size (k_h, k_w) = k_size q_h_ratio = max(k_h / q_h, 1.0) k_h_ratio = max(q_h / k_h, 1.0) dist_h = torch.arange(q_h, device=q.device).unsqueeze(-1) * q_h_ratio - torch.arange(k_h, device=q.device).unsqueeze(0) * k_h_ratio dist_h += (k_h - 1) * k_h_ratio q_w_ratio = max(k_w / q_w, 1.0) k_w_ratio = max(q_w / k_w, 1.0) dist_w = torch.arange(q_w, device=q.device).unsqueeze(-1) * q_w_ratio - torch.arange(k_w, device=q.device).unsqueeze(0) * k_w_ratio dist_w += (k_w - 1) * k_w_ratio rel_h = rel_pos_h[dist_h.long()] rel_w = rel_pos_w[dist_w.long()] (B, n_head, q_N, dim) = q.shape r_q = q[:, :, sp_idx:].reshape(B, n_head, q_h, q_w, dim) rel_h = torch.einsum('byhwc,hkc->byhwk', r_q, rel_h) rel_w = torch.einsum('byhwc,wkc->byhwk', r_q, rel_w) attn[:, :, sp_idx:, sp_idx:] = (attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w) + rel_h.unsqueeze(-1) + rel_w.unsqueeze(-2)).view(B, -1, q_h * q_w, k_h * k_w) return attn class MultiScaleAttentionPoolFirst(nn.Module): def __init__(self, dim, dim_out, feat_size, num_heads=8, qkv_bias=True, mode='conv', kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.head_dim = dim_out // num_heads self.scale = self.head_dim ** (-0.5) self.has_cls_token = has_cls_token padding_q = tuple([int(q // 2) for q in kernel_q]) padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) self.q = nn.Linear(dim, dim_out, bias=qkv_bias) self.k = nn.Linear(dim, dim_out, bias=qkv_bias) self.v = nn.Linear(dim, dim_out, bias=qkv_bias) self.proj = nn.Linear(dim_out, dim_out) if prod(kernel_q) == 1 and prod(stride_q) == 1: kernel_q = None if prod(kernel_kv) == 1 and prod(stride_kv) == 1: kernel_kv = None self.mode = mode self.unshared = mode == 'conv_unshared' (self.pool_q, self.pool_k, self.pool_v) = (None, None, None) (self.norm_q, self.norm_k, self.norm_v) = (None, None, None) if mode in ('avg', 'max'): pool_op = nn.MaxPool2d if mode == 'max' else nn.AvgPool2d if kernel_q: self.pool_q = pool_op(kernel_q, stride_q, padding_q) if kernel_kv: self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) elif mode == 'conv' or mode == 'conv_unshared': dim_conv = dim // num_heads if mode == 'conv' else dim if kernel_q: self.pool_q = nn.Conv2d(dim_conv, dim_conv, kernel_q, stride=stride_q, padding=padding_q, groups=dim_conv, bias=False) self.norm_q = norm_layer(dim_conv) if kernel_kv: self.pool_k = nn.Conv2d(dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False) self.norm_k = norm_layer(dim_conv) self.pool_v = nn.Conv2d(dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False) self.norm_v = norm_layer(dim_conv) else: raise NotImplementedError(f'Unsupported model {mode}') self.rel_pos_type = rel_pos_type if self.rel_pos_type == 'spatial': assert feat_size[0] == feat_size[1] size = feat_size[0] q_size = size // stride_q[1] if len(stride_q) > 0 else size kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size rel_sp_dim = 2 * max(q_size, kv_size) - 1 self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) trunc_normal_tf_(self.rel_pos_h, std=0.02) trunc_normal_tf_(self.rel_pos_w, std=0.02) self.residual_pooling = residual_pooling def forward(self, x, feat_size: List[int]): (B, N, _) = x.shape fold_dim = 1 if self.unshared else self.num_heads x = x.reshape(B, N, fold_dim, -1).permute(0, 2, 1, 3) q = k = v = x if self.pool_q is not None: (q, q_tok) = reshape_pre_pool(q, feat_size, self.has_cls_token) q = self.pool_q(q) (q, q_size) = reshape_post_pool(q, self.num_heads, q_tok) else: q_size = feat_size if self.norm_q is not None: q = self.norm_q(q) if self.pool_k is not None: (k, k_tok) = reshape_pre_pool(k, feat_size, self.has_cls_token) k = self.pool_k(k) (k, k_size) = reshape_post_pool(k, self.num_heads, k_tok) else: k_size = feat_size if self.norm_k is not None: k = self.norm_k(k) if self.pool_v is not None: (v, v_tok) = reshape_pre_pool(v, feat_size, self.has_cls_token) v = self.pool_v(v) (v, v_size) = reshape_post_pool(v, self.num_heads, v_tok) else: v_size = feat_size if self.norm_v is not None: v = self.norm_v(v) q_N = q_size[0] * q_size[1] + int(self.has_cls_token) q = q.transpose(1, 2).reshape(B, q_N, -1) q = self.q(q).reshape(B, q_N, self.num_heads, -1).transpose(1, 2) k_N = k_size[0] * k_size[1] + int(self.has_cls_token) k = k.transpose(1, 2).reshape(B, k_N, -1) k = self.k(k).reshape(B, k_N, self.num_heads, -1) v_N = v_size[0] * v_size[1] + int(self.has_cls_token) v = v.transpose(1, 2).reshape(B, v_N, -1) v = self.v(v).reshape(B, v_N, self.num_heads, -1).transpose(1, 2) attn = q * self.scale @ k if self.rel_pos_type == 'spatial': attn = cal_rel_pos_type(attn, q, self.has_cls_token, q_size, k_size, self.rel_pos_h, self.rel_pos_w) attn = attn.softmax(dim=-1) x = attn @ v if self.residual_pooling: x = x + q x = x.transpose(1, 2).reshape(B, -1, self.dim_out) x = self.proj(x) return (x, q_size) class MultiScaleAttention(nn.Module): def __init__(self, dim, dim_out, feat_size, num_heads=8, qkv_bias=True, mode='conv', kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.head_dim = dim_out // num_heads self.scale = self.head_dim ** (-0.5) self.has_cls_token = has_cls_token padding_q = tuple([int(q // 2) for q in kernel_q]) padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) self.proj = nn.Linear(dim_out, dim_out) if prod(kernel_q) == 1 and prod(stride_q) == 1: kernel_q = None if prod(kernel_kv) == 1 and prod(stride_kv) == 1: kernel_kv = None self.mode = mode self.unshared = mode == 'conv_unshared' (self.norm_q, self.norm_k, self.norm_v) = (None, None, None) (self.pool_q, self.pool_k, self.pool_v) = (None, None, None) if mode in ('avg', 'max'): pool_op = nn.MaxPool2d if mode == 'max' else nn.AvgPool2d if kernel_q: self.pool_q = pool_op(kernel_q, stride_q, padding_q) if kernel_kv: self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) elif mode == 'conv' or mode == 'conv_unshared': dim_conv = dim_out // num_heads if mode == 'conv' else dim_out if kernel_q: self.pool_q = nn.Conv2d(dim_conv, dim_conv, kernel_q, stride=stride_q, padding=padding_q, groups=dim_conv, bias=False) self.norm_q = norm_layer(dim_conv) if kernel_kv: self.pool_k = nn.Conv2d(dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False) self.norm_k = norm_layer(dim_conv) self.pool_v = nn.Conv2d(dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False) self.norm_v = norm_layer(dim_conv) else: raise NotImplementedError(f'Unsupported model {mode}') self.rel_pos_type = rel_pos_type if self.rel_pos_type == 'spatial': assert feat_size[0] == feat_size[1] size = feat_size[0] q_size = size // stride_q[1] if len(stride_q) > 0 else size kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size rel_sp_dim = 2 * max(q_size, kv_size) - 1 self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) trunc_normal_tf_(self.rel_pos_h, std=0.02) trunc_normal_tf_(self.rel_pos_w, std=0.02) self.residual_pooling = residual_pooling def forward(self, x, feat_size: List[int]): (B, N, _) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(dim=0) if self.pool_q is not None: (q, q_tok) = reshape_pre_pool(q, feat_size, self.has_cls_token) q = self.pool_q(q) (q, q_size) = reshape_post_pool(q, self.num_heads, q_tok) else: q_size = feat_size if self.norm_q is not None: q = self.norm_q(q) if self.pool_k is not None: (k, k_tok) = reshape_pre_pool(k, feat_size, self.has_cls_token) k = self.pool_k(k) (k, k_size) = reshape_post_pool(k, self.num_heads, k_tok) else: k_size = feat_size if self.norm_k is not None: k = self.norm_k(k) if self.pool_v is not None: (v, v_tok) = reshape_pre_pool(v, feat_size, self.has_cls_token) v = self.pool_v(v) (v, _) = reshape_post_pool(v, self.num_heads, v_tok) if self.norm_v is not None: v = self.norm_v(v) attn = q * self.scale @ k.transpose(-2, -1) if self.rel_pos_type == 'spatial': attn = cal_rel_pos_type(attn, q, self.has_cls_token, q_size, k_size, self.rel_pos_h, self.rel_pos_w) attn = attn.softmax(dim=-1) x = attn @ v if self.residual_pooling: x = x + q x = x.transpose(1, 2).reshape(B, -1, self.dim_out) x = self.proj(x) return (x, q_size) class MultiScaleBlock(nn.Module): def __init__(self, dim, dim_out, num_heads, feat_size, mlp_ratio=4.0, qkv_bias=True, drop_path=0.0, norm_layer=nn.LayerNorm, kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), mode='conv', has_cls_token=True, expand_attn=False, pool_first=False, rel_pos_type='spatial', residual_pooling=True): super().__init__() proj_needed = dim != dim_out self.dim = dim self.dim_out = dim_out self.has_cls_token = has_cls_token self.norm1 = norm_layer(dim) self.shortcut_proj_attn = nn.Linear(dim, dim_out) if proj_needed and expand_attn else None if stride_q and prod(stride_q) > 1: kernel_skip = [s + 1 if s > 1 else s for s in stride_q] stride_skip = stride_q padding_skip = [int(skip // 2) for skip in kernel_skip] self.shortcut_pool_attn = nn.MaxPool2d(kernel_skip, stride_skip, padding_skip) else: self.shortcut_pool_attn = None att_dim = dim_out if expand_attn else dim attn_layer = MultiScaleAttentionPoolFirst if pool_first else MultiScaleAttention self.attn = attn_layer(dim, att_dim, num_heads=num_heads, feat_size=feat_size, qkv_bias=qkv_bias, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q, stride_kv=stride_kv, norm_layer=norm_layer, has_cls_token=has_cls_token, mode=mode, rel_pos_type=rel_pos_type, residual_pooling=residual_pooling) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(att_dim) mlp_dim_out = dim_out self.shortcut_proj_mlp = nn.Linear(dim, dim_out) if proj_needed and (not expand_attn) else None self.mlp = Mlp(in_features=att_dim, hidden_features=int(att_dim * mlp_ratio), out_features=mlp_dim_out) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def _shortcut_pool(self, x, feat_size: List[int]): if self.shortcut_pool_attn is None: return x if self.has_cls_token: (cls_tok, x) = (x[:, :1, :], x[:, 1:, :]) else: cls_tok = None (B, L, C) = x.shape (H, W) = feat_size x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous() x = self.shortcut_pool_attn(x) x = x.reshape(B, C, -1).transpose(1, 2) if cls_tok is not None: x = torch.cat((cls_tok, x), dim=1) return x def forward(self, x, feat_size: List[int]): x_norm = self.norm1(x) x_shortcut = x if self.shortcut_proj_attn is None else self.shortcut_proj_attn(x_norm) x_shortcut = self._shortcut_pool(x_shortcut, feat_size) (x, feat_size_new) = self.attn(x_norm, feat_size) x = x_shortcut + self.drop_path1(x) x_norm = self.norm2(x) x_shortcut = x if self.shortcut_proj_mlp is None else self.shortcut_proj_mlp(x_norm) x = x_shortcut + self.drop_path2(self.mlp(x_norm)) return (x, feat_size_new) class MultiScaleVitStage(nn.Module): def __init__(self, dim, dim_out, depth, num_heads, feat_size, mlp_ratio=4.0, qkv_bias=True, mode='conv', kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, expand_attn=False, pool_first=False, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm, drop_path=0.0): super().__init__() self.grad_checkpointing = False self.blocks = nn.ModuleList() if expand_attn: out_dims = (dim_out,) * depth else: out_dims = (dim,) * (depth - 1) + (dim_out,) for i in range(depth): attention_block = MultiScaleBlock(dim=dim, dim_out=out_dims[i], num_heads=num_heads, feat_size=feat_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q if i == 0 else (1, 1), stride_kv=stride_kv, mode=mode, has_cls_token=has_cls_token, pool_first=pool_first, rel_pos_type=rel_pos_type, residual_pooling=residual_pooling, expand_attn=expand_attn, norm_layer=norm_layer, drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path) dim = out_dims[i] self.blocks.append(attention_block) if i == 0: feat_size = tuple([size // stride for (size, stride) in zip(feat_size, stride_q)]) self.feat_size = feat_size def forward(self, x, feat_size: List[int]): for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): (x, feat_size) = checkpoint.checkpoint(blk, x, feat_size) else: (x, feat_size) = blk(x, feat_size) return (x, feat_size) class MultiScaleVit(nn.Module): def __init__(self, cfg: MultiScaleVitCfg, img_size: Tuple[int, int]=(224, 224), in_chans: int=3, global_pool: Optional[str]=None, num_classes: int=1000, drop_path_rate: float=0.0, drop_rate: float=0.0): super().__init__() img_size = to_2tuple(img_size) norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate if global_pool is None: global_pool = 'token' if cfg.use_cls_token else 'avg' self.global_pool = global_pool self.depths = tuple(cfg.depths) self.expand_attn = cfg.expand_attn embed_dim = cfg.embed_dim[0] self.patch_embed = PatchEmbed(dim_in=in_chans, dim_out=embed_dim, kernel=cfg.patch_kernel, stride=cfg.patch_stride, padding=cfg.patch_padding) patch_dims = (img_size[0] // cfg.patch_stride[0], img_size[1] // cfg.patch_stride[1]) num_patches = prod(patch_dims) if cfg.use_cls_token: self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.num_prefix_tokens = 1 pos_embed_dim = num_patches + 1 else: self.num_prefix_tokens = 0 self.cls_token = None pos_embed_dim = num_patches if cfg.use_abs_pos: self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_dim, embed_dim)) else: self.pos_embed = None num_stages = len(cfg.embed_dim) feat_size = patch_dims curr_stride = max(cfg.patch_stride) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] self.stages = nn.ModuleList() self.feature_info = [] for i in range(num_stages): if cfg.expand_attn: dim_out = cfg.embed_dim[i] else: dim_out = cfg.embed_dim[min(i + 1, num_stages - 1)] stage = MultiScaleVitStage(dim=embed_dim, dim_out=dim_out, depth=cfg.depths[i], num_heads=cfg.num_heads[i], feat_size=feat_size, mlp_ratio=cfg.mlp_ratio, qkv_bias=cfg.qkv_bias, mode=cfg.mode, pool_first=cfg.pool_first, expand_attn=cfg.expand_attn, kernel_q=cfg.kernel_qkv, kernel_kv=cfg.kernel_qkv, stride_q=cfg.stride_q[i], stride_kv=cfg.stride_kv[i], has_cls_token=cfg.use_cls_token, rel_pos_type=cfg.rel_pos_type, residual_pooling=cfg.residual_pooling, norm_layer=norm_layer, drop_path=dpr[i]) curr_stride *= max(cfg.stride_q[i]) self.feature_info += [dict(module=f'block.{i}', num_chs=dim_out, reduction=curr_stride)] embed_dim = dim_out feat_size = stage.feat_size self.stages.append(stage) self.num_features = self.head_hidden_size = embed_dim self.norm = norm_layer(embed_dim) self.head = nn.Sequential(OrderedDict([('drop', nn.Dropout(self.drop_rate)), ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())])) if self.pos_embed is not None: trunc_normal_tf_(self.pos_embed, std=0.02) if self.cls_token is not None: trunc_normal_tf_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_tf_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0.0) @torch.jit.ignore def no_weight_decay(self): return {k for (k, _) in self.named_parameters() if any((n in k for n in ['pos_embed', 'rel_pos_h', 'rel_pos_w', 'cls_token']))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^patch_embed', blocks=[('^stages\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Sequential(OrderedDict([('drop', nn.Dropout(self.drop_rate)), ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())])) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output shape must be NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stages), indices) (x, feat_size) = self.patch_embed(x) B = x.shape[0] if self.cls_token is not None: cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed for (i, stage) in enumerate(self.stages): (x, feat_size) = stage(x, feat_size) if i in take_indices: if norm and i == len(self.stages) - 1: x_inter = self.norm(x) else: x_inter = x if reshape: if self.cls_token is not None: x_inter = x_inter[:, 1:] x_inter = x_inter.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2) intermediates.append(x_inter) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stages), indices) if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): (x, feat_size) = self.patch_embed(x) (B, N, C) = x.shape if self.cls_token is not None: cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed for stage in self.stages: (x, feat_size) = stage(x, feat_size) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: if self.global_pool == 'avg': x = x[:, self.num_prefix_tokens:].mean(1) else: x = x[:, 0] return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'stages.0.blocks.0.norm1.weight' in state_dict: for k in state_dict.keys(): if 'rel_pos' in k: rel_pos = state_dict[k] dest_rel_pos_shape = model.state_dict()[k].shape if rel_pos.shape[0] != dest_rel_pos_shape[0]: rel_pos_resized = torch.nn.functional.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=dest_rel_pos_shape[0], mode='linear') state_dict[k] = rel_pos_resized.reshape(-1, dest_rel_pos_shape[0]).permute(1, 0) return state_dict import re if 'model_state' in state_dict: state_dict = state_dict['model_state'] depths = getattr(model, 'depths', None) expand_attn = getattr(model, 'expand_attn', True) assert depths is not None, 'model requires depth attribute to remap checkpoints' depth_map = {} block_idx = 0 for (stage_idx, d) in enumerate(depths): depth_map.update({i: (stage_idx, i - block_idx) for i in range(block_idx, block_idx + d)}) block_idx += d out_dict = {} for (k, v) in state_dict.items(): k = re.sub('blocks\\.(\\d+)', lambda x: f'stages.{depth_map[int(x.group(1))][0]}.blocks.{depth_map[int(x.group(1))][1]}', k) if expand_attn: k = re.sub('stages\\.(\\d+).blocks\\.(\\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_attn', k) else: k = re.sub('stages\\.(\\d+).blocks\\.(\\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_mlp', k) if 'head' in k: k = k.replace('head.projection', 'head.fc') out_dict[k] = v return out_dict model_cfgs = dict(mvitv2_tiny=MultiScaleVitCfg(depths=(1, 2, 5, 2)), mvitv2_small=MultiScaleVitCfg(depths=(1, 2, 11, 2)), mvitv2_base=MultiScaleVitCfg(depths=(2, 3, 16, 3)), mvitv2_large=MultiScaleVitCfg(depths=(2, 6, 36, 4), embed_dim=144, num_heads=2, expand_attn=False), mvitv2_small_cls=MultiScaleVitCfg(depths=(1, 2, 11, 2), use_cls_token=True), mvitv2_base_cls=MultiScaleVitCfg(depths=(2, 3, 16, 3), use_cls_token=True), mvitv2_large_cls=MultiScaleVitCfg(depths=(2, 6, 36, 4), embed_dim=144, num_heads=2, use_cls_token=True, expand_attn=True), mvitv2_huge_cls=MultiScaleVitCfg(depths=(4, 8, 60, 8), embed_dim=192, num_heads=3, use_cls_token=True, expand_attn=True)) def _create_mvitv2(variant, cfg_variant=None, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 4) return build_model_with_cfg(MultiScaleVit, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs} default_cfgs = generate_default_cfgs({'mvitv2_tiny.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_small.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_base.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_large.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_small_cls': _cfg(url=''), 'mvitv2_base_cls.fb_inw21k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in21k.pyth', hf_hub_id='timm/', num_classes=19168), 'mvitv2_large_cls.fb_inw21k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in21k.pyth', hf_hub_id='timm/', num_classes=19168), 'mvitv2_huge_cls.fb_inw21k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_H_in21k.pyth', hf_hub_id='timm/', num_classes=19168)}) @register_model def mvitv2_tiny(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_tiny', pretrained=pretrained, **kwargs) @register_model def mvitv2_small(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_small', pretrained=pretrained, **kwargs) @register_model def mvitv2_base(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_base', pretrained=pretrained, **kwargs) @register_model def mvitv2_large(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_large', pretrained=pretrained, **kwargs) @register_model def mvitv2_small_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_small_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_base_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_base_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_large_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_large_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_huge_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_huge_cls', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/nasnet.py """""" from functools import partial from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['NASNetALarge'] class ActConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): super(ActConvBn, self).__init__() self.act = nn.ReLU() self.conv = create_conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act(x) x = self.conv(x) x = self.bn(x) return x class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = create_conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels) self.pointwise_conv2d = create_conv2d(in_channels, out_channels, kernel_size=1, padding=0) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): super(BranchSeparables, self).__init__() middle_channels = out_channels if stem_cell else in_channels self.act_1 = nn.ReLU() self.separable_1 = SeparableConv2d(in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) self.act_2 = nn.ReLU(inplace=True) self.separable_2 = SeparableConv2d(middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act_1(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.act_2(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class CellStem0(nn.Module): def __init__(self, stem_size, num_channels=42, pad_type=''): super(CellStem0, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x): x1 = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x1) x_comb_iter_0_right = self.comb_iter_0_right(x) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x1) x_comb_iter_1_right = self.comb_iter_1_right(x) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x1) x_comb_iter_2_right = self.comb_iter_2_right(x) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x1) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class CellStem1(nn.Module): def __init__(self, stem_size, num_channels, pad_type=''): super(CellStem1, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x_conv0, x_stem_0): x_left = self.conv_1x1(x_stem_0) x_relu = self.act(x_conv0) x_path1 = self.path_1(x_relu) x_path2 = self.path_2(x_relu) x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_right) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_left) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_left) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class FirstCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(FirstCell, self).__init__() self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_relu = self.act(x_prev) x_path1 = self.path_1(x_relu) x_path2 = self.path_2(x_relu) x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NormalCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(NormalCell, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell0(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell0, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell1(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NASNetALarge(nn.Module): def __init__(self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, num_features=4032, output_stride=32, drop_rate=0.0, global_pool='avg', pad_type='same'): super(NASNetALarge, self).__init__() self.num_classes = num_classes self.stem_size = stem_size self.num_features = self.head_hidden_size = num_features self.channel_multiplier = channel_multiplier assert output_stride == 32 channels = self.num_features // 24 self.conv0 = ConvNormAct(in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) self.cell_stem_0 = CellStem0(self.stem_size, num_channels=channels // channel_multiplier ** 2, pad_type=pad_type) self.cell_stem_1 = CellStem1(self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) self.cell_0 = FirstCell(in_chs_left=channels, out_chs_left=channels // 2, in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_1 = NormalCell(in_chs_left=2 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_2 = NormalCell(in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_3 = NormalCell(in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_4 = NormalCell(in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_5 = NormalCell(in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.reduction_cell_0 = ReductionCell0(in_chs_left=6 * channels, out_chs_left=2 * channels, in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_6 = FirstCell(in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_7 = NormalCell(in_chs_left=8 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_8 = NormalCell(in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_9 = NormalCell(in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_10 = NormalCell(in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_11 = NormalCell(in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.reduction_cell_1 = ReductionCell1(in_chs_left=12 * channels, out_chs_left=4 * channels, in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_12 = FirstCell(in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_13 = NormalCell(in_chs_left=16 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_14 = NormalCell(in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_15 = NormalCell(in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_16 = NormalCell(in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_17 = NormalCell(in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.act = nn.ReLU(inplace=True) self.feature_info = [dict(num_chs=96, reduction=2, module='conv0'), dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), dict(num_chs=4032, reduction=32, module='act')] (self.global_pool, self.head_drop, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^conv0|cell_stem_[01]', blocks=[('^cell_(\\d+)', None), ('^reduction_cell_0', (6,)), ('^reduction_cell_1', (12,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.last_linear def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x_conv0 = self.conv0(x) x_stem_0 = self.cell_stem_0(x_conv0) x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) x_cell_0 = self.cell_0(x_stem_1, x_stem_0) x_cell_1 = self.cell_1(x_cell_0, x_stem_1) x_cell_2 = self.cell_2(x_cell_1, x_cell_0) x_cell_3 = self.cell_3(x_cell_2, x_cell_1) x_cell_4 = self.cell_4(x_cell_3, x_cell_2) x_cell_5 = self.cell_5(x_cell_4, x_cell_3) x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) x_cell_8 = self.cell_8(x_cell_7, x_cell_6) x_cell_9 = self.cell_9(x_cell_8, x_cell_7) x_cell_10 = self.cell_10(x_cell_9, x_cell_8) x_cell_11 = self.cell_11(x_cell_10, x_cell_9) x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) x_cell_14 = self.cell_14(x_cell_13, x_cell_12) x_cell_15 = self.cell_15(x_cell_14, x_cell_13) x_cell_16 = self.cell_16(x_cell_15, x_cell_14) x_cell_17 = self.cell_17(x_cell_16, x_cell_15) x = self.act(x_cell_17) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_nasnet(variant, pretrained=False, **kwargs): return build_model_with_cfg(NASNetALarge, variant, pretrained, feature_cfg=dict(feature_cls='hook', no_rewrite=True), **kwargs) default_cfgs = generate_default_cfgs({'nasnetalarge.tf_in1k': {'hf_hub_id': 'timm/', 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth', 'input_size': (3, 331, 331), 'pool_size': (11, 11), 'crop_pct': 0.911, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv0.conv', 'classifier': 'last_linear'}}) @register_model def nasnetalarge(pretrained=False, **kwargs) -> NASNetALarge: model_kwargs = dict(pad_type='same', **kwargs) return _create_nasnet('nasnetalarge', pretrained, **model_kwargs) # File: pytorch-image-models-main/timm/models/nest.py """""" import collections.abc import logging import math from functools import partial import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Nest'] _logger = logging.getLogger(__name__) class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, T, N, C) = x.shape qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) (q, k, v) = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C) x = self.proj(x) x = self.proj_drop(x) return x class TransformerLayer(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop) def forward(self, x): y = self.norm1(x) x = x + self.drop_path(self.attn(y)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConvPool(nn.Module): def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): super().__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) self.norm = norm_layer(out_channels) self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) def forward(self, x): _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') x = self.conv(x) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) x = self.pool(x) return x def blockify(x, block_size: int): (B, H, W, C) = x.shape _assert(H % block_size == 0, '`block_size` must divide input height evenly') _assert(W % block_size == 0, '`block_size` must divide input width evenly') grid_height = H // block_size grid_width = W // block_size x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) return x @register_notrace_function def deblockify(x, block_size: int): (B, T, _, C) = x.shape grid_size = int(math.sqrt(T)) height = width = grid_size * block_size x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) x = x.transpose(2, 3).reshape(B, height, width, C) return x class NestLevel(nn.Module): def __init__(self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, mlp_ratio=4.0, qkv_bias=True, proj_drop=0.0, attn_drop=0.0, drop_path=[], norm_layer=None, act_layer=None, pad_type=''): super().__init__() self.block_size = block_size self.grad_checkpointing = False self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) if prev_embed_dim is not None: self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) else: self.pool = nn.Identity() if len(drop_path): assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers' self.transformer_encoder = nn.Sequential(*[TransformerLayer(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) def forward(self, x): x = self.pool(x) x = x.permute(0, 2, 3, 1) x = blockify(x, self.block_size) x = x + self.pos_embed if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.transformer_encoder, x) else: x = self.transformer_encoder(x) x = deblockify(x, self.block_size) return x.permute(0, 3, 1, 2) class Nest(nn.Module): def __init__(self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4.0, qkv_bias=True, drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.5, norm_layer=None, act_layer=None, pad_type='', weight_init='', global_pool='avg'): super().__init__() for param_name in ['embed_dims', 'num_heads', 'depths']: param_value = locals()[param_name] if isinstance(param_value, collections.abc.Sequence): assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' embed_dims = to_ntuple(num_levels)(embed_dims) num_heads = to_ntuple(num_levels)(num_heads) depths = to_ntuple(num_levels)(depths) self.num_classes = num_classes self.num_features = self.head_hidden_size = embed_dims[-1] self.feature_info = [] norm_layer = norm_layer or LayerNorm act_layer = act_layer or nn.GELU self.drop_rate = drop_rate self.num_levels = num_levels if isinstance(img_size, collections.abc.Sequence): assert img_size[0] == img_size[1], 'Model only handles square inputs' img_size = img_size[0] assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' self.patch_size = patch_size self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() assert img_size // patch_size % math.sqrt(self.num_blocks[0]) == 0, "First level blocks don't fit evenly. Check `img_size`, `patch_size`, and `num_levels`" self.block_size = int(img_size // patch_size // math.sqrt(self.num_blocks[0])) self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False) self.num_patches = self.patch_embed.num_patches self.seq_length = self.num_patches // self.num_blocks[0] levels = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_dim = None curr_stride = 4 for i in range(len(self.num_blocks)): dim = embed_dims[i] levels.append(NestLevel(self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dp_rates[i], norm_layer=norm_layer, act_layer=act_layer, pad_type=pad_type)) self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] prev_dim = dim curr_stride *= 2 self.levels = nn.Sequential(*levels) self.norm = norm_layer(embed_dims[-1]) (global_pool, head) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.0 for level in self.levels: trunc_normal_(level.pos_embed, std=0.02, a=-2, b=2) named_apply(partial(_init_nest_weights, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {f'level.{i}.pos_embed' for i in range(len(self.levels))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^patch_embed', blocks=[('^levels\\.(\\d+)' if coarse else '^levels\\.(\\d+)\\.transformer_encoder\\.(\\d+)', None), ('^levels\\.(\\d+)\\.(?:pool|pos_embed)', (0,)), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.levels: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.head) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.levels(x) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_nest_weights(module: nn.Module, name: str='', head_bias: float=0.0): if isinstance(module, nn.Linear): if name.startswith('head'): trunc_normal_(module.weight, std=0.02, a=-2, b=2) nn.init.constant_(module.bias, head_bias) else: trunc_normal_(module.weight, std=0.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=0.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) def resize_pos_embed(posemb, posemb_new): _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) seq_length_old = posemb.shape[2] (num_blocks_new, seq_length_new) = posemb_new.shape[1:3] size_new = int(math.sqrt(num_blocks_new * seq_length_new)) posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) return posemb def checkpoint_filter_fn(state_dict, model): pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] for k in pos_embed_keys: if state_dict[k].shape != getattr(model, k).shape: state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) return state_dict def _create_nest(variant, pretrained=False, **kwargs): model = build_model_with_cfg(Nest, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'nest_base.untrained': _cfg(), 'nest_small.untrained': _cfg(), 'nest_tiny.untrained': _cfg(), 'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/')}) @register_model def nest_base(pretrained=False, **kwargs) -> Nest: model_kwargs = dict(embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small(pretrained=False, **kwargs) -> Nest: model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny(pretrained=False, **kwargs) -> Nest: model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) return model @register_model def nest_base_jx(pretrained=False, **kwargs) -> Nest: kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small_jx(pretrained=False, **kwargs) -> Nest: kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny_jx(pretrained=False, **kwargs) -> Nest: kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs) return model register_model_deprecations(__name__, {'jx_nest_base': 'nest_base_jx', 'jx_nest_small': 'nest_small_jx', 'jx_nest_tiny': 'nest_tiny_jx'}) # File: pytorch-image-models-main/timm/models/nextvit.py """""" from functools import partial from typing import Optional import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, ConvMlp, get_norm_layer, get_act_layer, use_fused_attn from timm.layers import ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['NextViT'] def merge_pre_bn(module, pre_bn_1, pre_bn_2=None): weight = module.weight.data if module.bias is None: zeros = torch.zeros(module.out_chs, device=weight.device).type(weight.type()) module.bias = nn.Parameter(zeros) bias = module.bias.data if pre_bn_2 is None: assert pre_bn_1.track_running_stats is True, 'Unsupported bn_module.track_running_stats is False' assert pre_bn_1.affine is True, 'Unsupported bn_module.affine is False' scale_invstd = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) extra_weight = scale_invstd * pre_bn_1.weight extra_bias = pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd else: assert pre_bn_1.track_running_stats is True, 'Unsupported bn_module.track_running_stats is False' assert pre_bn_1.affine is True, 'Unsupported bn_module.affine is False' assert pre_bn_2.track_running_stats is True, 'Unsupported bn_module.track_running_stats is False' assert pre_bn_2.affine is True, 'Unsupported bn_module.affine is False' scale_invstd_1 = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) scale_invstd_2 = pre_bn_2.running_var.add(pre_bn_2.eps).pow(-0.5) extra_weight = scale_invstd_1 * pre_bn_1.weight * scale_invstd_2 * pre_bn_2.weight extra_bias = scale_invstd_2 * pre_bn_2.weight * (pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd_1 - pre_bn_2.running_mean) + pre_bn_2.bias if isinstance(module, nn.Linear): extra_bias = weight @ extra_bias weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) elif isinstance(module, nn.Conv2d): assert weight.shape[2] == 1 and weight.shape[3] == 1 weight = weight.reshape(weight.shape[0], weight.shape[1]) extra_bias = weight @ extra_bias weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) weight = weight.reshape(weight.shape[0], weight.shape[1], 1, 1) bias.add_(extra_bias) module.weight.data = weight module.bias.data = bias class ConvNormAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, groups=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU): super(ConvNormAct, self).__init__() self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=1, groups=groups, bias=False) self.norm = norm_layer(out_chs) self.act = act_layer() def forward(self, x): x = self.conv(x) x = self.norm(x) x = self.act(x) return x def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class PatchEmbed(nn.Module): def __init__(self, in_chs, out_chs, stride=1, norm_layer=nn.BatchNorm2d): super(PatchEmbed, self).__init__() if stride == 2: self.pool = nn.AvgPool2d((2, 2), stride=2, ceil_mode=True, count_include_pad=False) self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) self.norm = norm_layer(out_chs) elif in_chs != out_chs: self.pool = nn.Identity() self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) self.norm = norm_layer(out_chs) else: self.pool = nn.Identity() self.conv = nn.Identity() self.norm = nn.Identity() def forward(self, x): return self.norm(self.conv(self.pool(x))) class ConvAttention(nn.Module): def __init__(self, out_chs, head_dim, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU): super(ConvAttention, self).__init__() self.group_conv3x3 = nn.Conv2d(out_chs, out_chs, kernel_size=3, stride=1, padding=1, groups=out_chs // head_dim, bias=False) self.norm = norm_layer(out_chs) self.act = act_layer() self.projection = nn.Conv2d(out_chs, out_chs, kernel_size=1, bias=False) def forward(self, x): out = self.group_conv3x3(x) out = self.norm(out) out = self.act(out) out = self.projection(out) return out class NextConvBlock(nn.Module): def __init__(self, in_chs, out_chs, stride=1, drop_path=0.0, drop=0.0, head_dim=32, mlp_ratio=3.0, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU): super(NextConvBlock, self).__init__() self.in_chs = in_chs self.out_chs = out_chs assert out_chs % head_dim == 0 self.patch_embed = PatchEmbed(in_chs, out_chs, stride, norm_layer=norm_layer) self.mhca = ConvAttention(out_chs, head_dim, norm_layer=norm_layer, act_layer=act_layer) self.attn_drop_path = DropPath(drop_path) self.norm = norm_layer(out_chs) self.mlp = ConvMlp(out_chs, hidden_features=int(out_chs * mlp_ratio), drop=drop, bias=True, act_layer=act_layer) self.mlp_drop_path = DropPath(drop_path) self.is_fused = False @torch.no_grad() def reparameterize(self): if not self.is_fused: merge_pre_bn(self.mlp.fc1, self.norm) self.norm = nn.Identity() self.is_fused = True def forward(self, x): x = self.patch_embed(x) x = x + self.attn_drop_path(self.mhca(x)) out = self.norm(x) x = x + self.mlp_drop_path(self.mlp(out)) return x class EfficientAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, out_dim=None, head_dim=32, qkv_bias=True, attn_drop=0.0, proj_drop=0.0, sr_ratio=1, norm_layer=nn.BatchNorm1d): super().__init__() self.dim = dim self.out_dim = out_dim if out_dim is not None else dim self.num_heads = self.dim // head_dim self.head_dim = head_dim self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, self.dim, bias=qkv_bias) self.k = nn.Linear(dim, self.dim, bias=qkv_bias) self.v = nn.Linear(dim, self.dim, bias=qkv_bias) self.proj = nn.Linear(self.dim, self.out_dim) self.attn_drop = nn.Dropout(attn_drop) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio self.N_ratio = sr_ratio ** 2 if sr_ratio > 1: self.sr = nn.AvgPool1d(kernel_size=self.N_ratio, stride=self.N_ratio) self.norm = norm_layer(dim) else: self.sr = None self.norm = None def forward(self, x): (B, N, C) = x.shape q = self.q(x).reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) if self.sr is not None: x = self.sr(x.transpose(1, 2)) x = self.norm(x).transpose(1, 2) k = self.k(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) v = self.v(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class NextTransformerBlock(nn.Module): def __init__(self, in_chs, out_chs, drop_path, stride=1, sr_ratio=1, mlp_ratio=2, head_dim=32, mix_block_ratio=0.75, attn_drop=0.0, drop=0.0, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU): super(NextTransformerBlock, self).__init__() self.in_chs = in_chs self.out_chs = out_chs self.mix_block_ratio = mix_block_ratio self.mhsa_out_chs = _make_divisible(int(out_chs * mix_block_ratio), 32) self.mhca_out_chs = out_chs - self.mhsa_out_chs self.patch_embed = PatchEmbed(in_chs, self.mhsa_out_chs, stride) self.norm1 = norm_layer(self.mhsa_out_chs) self.e_mhsa = EfficientAttention(self.mhsa_out_chs, head_dim=head_dim, sr_ratio=sr_ratio, attn_drop=attn_drop, proj_drop=drop) self.mhsa_drop_path = DropPath(drop_path * mix_block_ratio) self.projection = PatchEmbed(self.mhsa_out_chs, self.mhca_out_chs, stride=1, norm_layer=norm_layer) self.mhca = ConvAttention(self.mhca_out_chs, head_dim=head_dim, norm_layer=norm_layer, act_layer=act_layer) self.mhca_drop_path = DropPath(drop_path * (1 - mix_block_ratio)) self.norm2 = norm_layer(out_chs) self.mlp = ConvMlp(out_chs, hidden_features=int(out_chs * mlp_ratio), act_layer=act_layer, drop=drop) self.mlp_drop_path = DropPath(drop_path) self.is_fused = False @torch.no_grad() def reparameterize(self): if not self.is_fused: merge_pre_bn(self.e_mhsa.q, self.norm1) if self.e_mhsa.norm is not None: merge_pre_bn(self.e_mhsa.k, self.norm1, self.e_mhsa.norm) merge_pre_bn(self.e_mhsa.v, self.norm1, self.e_mhsa.norm) self.e_mhsa.norm = nn.Identity() else: merge_pre_bn(self.e_mhsa.k, self.norm1) merge_pre_bn(self.e_mhsa.v, self.norm1) self.norm1 = nn.Identity() merge_pre_bn(self.mlp.fc1, self.norm2) self.norm2 = nn.Identity() self.is_fused = True def forward(self, x): x = self.patch_embed(x) (B, C, H, W) = x.shape out = self.norm1(x) out = out.reshape(B, C, -1).transpose(-1, -2) out = self.mhsa_drop_path(self.e_mhsa(out)) x = x + out.transpose(-1, -2).reshape(B, C, H, W) out = self.projection(x) out = out + self.mhca_drop_path(self.mhca(out)) x = torch.cat([x, out], dim=1) out = self.norm2(x) x = x + self.mlp_drop_path(self.mlp(out)) return x class NextStage(nn.Module): def __init__(self, in_chs, block_chs, block_types, stride=2, sr_ratio=1, mix_block_ratio=1.0, drop=0.0, attn_drop=0.0, drop_path=0.0, head_dim=32, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU): super().__init__() self.grad_checkpointing = False blocks = [] for (block_idx, block_type) in enumerate(block_types): stride = stride if block_idx == 0 else 1 out_chs = block_chs[block_idx] block_type = block_types[block_idx] dpr = drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path if block_type is NextConvBlock: layer = NextConvBlock(in_chs, out_chs, stride=stride, drop_path=dpr, drop=drop, head_dim=head_dim, norm_layer=norm_layer, act_layer=act_layer) blocks.append(layer) elif block_type is NextTransformerBlock: layer = NextTransformerBlock(in_chs, out_chs, drop_path=dpr, stride=stride, sr_ratio=sr_ratio, head_dim=head_dim, mix_block_ratio=mix_block_ratio, attn_drop=attn_drop, drop=drop, norm_layer=norm_layer, act_layer=act_layer) blocks.append(layer) in_chs = out_chs self.blocks = nn.Sequential(*blocks) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x): if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class NextViT(nn.Module): def __init__(self, in_chans, num_classes=1000, global_pool='avg', stem_chs=(64, 32, 64), depths=(3, 4, 10, 3), strides=(1, 2, 2, 2), sr_ratios=(8, 4, 2, 1), drop_path_rate=0.1, attn_drop_rate=0.0, drop_rate=0.0, head_dim=32, mix_block_ratio=0.75, norm_layer=nn.BatchNorm2d, act_layer=None): super(NextViT, self).__init__() self.grad_checkpointing = False self.num_classes = num_classes norm_layer = get_norm_layer(norm_layer) if act_layer is None: act_layer = partial(nn.ReLU, inplace=True) else: act_layer = get_act_layer(act_layer) self.stage_out_chs = [[96] * depths[0], [192] * (depths[1] - 1) + [256], [384, 384, 384, 384, 512] * (depths[2] // 5), [768] * (depths[3] - 1) + [1024]] self.feature_info = [dict(num_chs=sc[-1], reduction=2 ** (i + 2), module=f'stages.{i}') for (i, sc) in enumerate(self.stage_out_chs)] self.stage_block_types = [[NextConvBlock] * depths[0], [NextConvBlock] * (depths[1] - 1) + [NextTransformerBlock], [NextConvBlock, NextConvBlock, NextConvBlock, NextConvBlock, NextTransformerBlock] * (depths[2] // 5), [NextConvBlock] * (depths[3] - 1) + [NextTransformerBlock]] self.stem = nn.Sequential(ConvNormAct(in_chans, stem_chs[0], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[0], stem_chs[1], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[1], stem_chs[2], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[2], stem_chs[2], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer)) in_chs = out_chs = stem_chs[-1] stages = [] idx = 0 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] for stage_idx in range(len(depths)): stage = NextStage(in_chs=in_chs, block_chs=self.stage_out_chs[stage_idx], block_types=self.stage_block_types[stage_idx], stride=strides[stage_idx], sr_ratio=sr_ratios[stage_idx], mix_block_ratio=mix_block_ratio, head_dim=head_dim, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[stage_idx], norm_layer=norm_layer, act_layer=act_layer) in_chs = out_chs = self.stage_out_chs[stage_idx][-1] stages += [stage] idx += depths[stage_idx] self.num_features = self.head_hidden_size = out_chs self.stages = nn.Sequential(*stages) self.norm = norm_layer(out_chs) self.head = ClassifierHead(pool_type=global_pool, in_features=out_chs, num_classes=num_classes) self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))] self._initialize_weights() def _initialize_weights(self): for (n, m) in self.named_modules(): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=0.02) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for stage in self.stages: stage.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'head.fc.weight' in state_dict: return state_dict D = model.state_dict() out_dict = {} for (ka, kb, va, vb) in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()): out_dict[ka] = vb return out_dict def _create_nextvit(variant, pretrained=False, **kwargs): default_out_indices = tuple((i for (i, _) in enumerate(kwargs.get('depths', (1, 1, 3, 1))))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(NextViT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'nextvit_small.bd_in1k': _cfg(hf_hub_id='timm/'), 'nextvit_base.bd_in1k': _cfg(hf_hub_id='timm/'), 'nextvit_large.bd_in1k': _cfg(hf_hub_id='timm/'), 'nextvit_small.bd_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'nextvit_base.bd_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'nextvit_large.bd_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'nextvit_small.bd_ssld_6m_in1k': _cfg(hf_hub_id='timm/'), 'nextvit_base.bd_ssld_6m_in1k': _cfg(hf_hub_id='timm/'), 'nextvit_large.bd_ssld_6m_in1k': _cfg(hf_hub_id='timm/'), 'nextvit_small.bd_ssld_6m_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'nextvit_base.bd_ssld_6m_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'nextvit_large.bd_ssld_6m_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0)}) @register_model def nextvit_small(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 10, 3), drop_path_rate=0.1) model = _create_nextvit('nextvit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def nextvit_base(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 20, 3), drop_path_rate=0.2) model = _create_nextvit('nextvit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def nextvit_large(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 30, 3), drop_path_rate=0.2) model = _create_nextvit('nextvit_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/nfnet.py """""" from collections import OrderedDict from dataclasses import dataclass, replace from functools import partial from typing import Callable, Tuple, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame, get_act_layer, get_act_fn, get_attn, make_divisible from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['NormFreeNet', 'NfCfg'] @dataclass class NfCfg: depths: Tuple[int, int, int, int] channels: Tuple[int, int, int, int] alpha: float = 0.2 stem_type: str = '3x3' stem_chs: Optional[int] = None group_size: Optional[int] = None attn_layer: Optional[str] = None attn_kwargs: dict = None attn_gain: float = 2.0 width_factor: float = 1.0 bottle_ratio: float = 0.5 num_features: int = 0 ch_div: int = 8 reg: bool = False extra_conv: bool = False gamma_in_act: bool = False same_padding: bool = False std_conv_eps: float = 1e-05 skipinit: bool = False zero_init_fc: bool = False act_layer: str = 'silu' class GammaAct(nn.Module): def __init__(self, act_type='relu', gamma: float=1.0, inplace=False): super().__init__() self.act_fn = get_act_fn(act_type) self.gamma = gamma self.inplace = inplace def forward(self, x): return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) def act_with_gamma(act_type, gamma: float=1.0): def _create(inplace=False): return GammaAct(act_type, gamma=gamma, inplace=inplace) return _create class DownsampleAvg(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=1, dilation: int=1, first_dilation: Optional[int]=None, conv_layer: Callable=ScaledStdConv2d): super(DownsampleAvg, self).__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = conv_layer(in_chs, out_chs, 1, stride=1) def forward(self, x): return self.conv(self.pool(x)) @register_notrace_module class NormFreeBlock(nn.Module): def __init__(self, in_chs: int, out_chs: Optional[int]=None, stride: int=1, dilation: int=1, first_dilation: Optional[int]=None, alpha: float=1.0, beta: float=1.0, bottle_ratio: float=0.25, group_size: Optional[int]=None, ch_div: int=1, reg: bool=True, extra_conv: bool=False, skipinit: bool=False, attn_layer: Optional[Callable]=None, attn_gain: bool=2.0, act_layer: Optional[Callable]=None, conv_layer: Callable=ScaledStdConv2d, drop_path_rate: float=0.0): super().__init__() first_dilation = first_dilation or dilation out_chs = out_chs or in_chs mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) groups = 1 if not group_size else mid_chs // group_size if group_size and group_size % ch_div == 0: mid_chs = group_size * groups self.alpha = alpha self.beta = beta self.attn_gain = attn_gain if in_chs != out_chs or stride != 1 or dilation != first_dilation: self.downsample = DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer) else: self.downsample = None self.act1 = act_layer() self.conv1 = conv_layer(in_chs, mid_chs, 1) self.act2 = act_layer(inplace=True) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) if extra_conv: self.act2b = act_layer(inplace=True) self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) else: self.act2b = None self.conv2b = None if reg and attn_layer is not None: self.attn = attn_layer(mid_chs) else: self.attn = None self.act3 = act_layer() self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1.0 if skipinit else 0.0) if not reg and attn_layer is not None: self.attn_last = attn_layer(out_chs) else: self.attn_last = None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.skipinit_gain = nn.Parameter(torch.tensor(0.0)) if skipinit else None def forward(self, x): out = self.act1(x) * self.beta shortcut = x if self.downsample is not None: shortcut = self.downsample(out) out = self.conv1(out) out = self.conv2(self.act2(out)) if self.conv2b is not None: out = self.conv2b(self.act2b(out)) if self.attn is not None: out = self.attn_gain * self.attn(out) out = self.conv3(self.act3(out)) if self.attn_last is not None: out = self.attn_gain * self.attn_last(out) out = self.drop_path(out) if self.skipinit_gain is not None: out.mul_(self.skipinit_gain) out = out * self.alpha + shortcut return out def create_stem(in_chs: int, out_chs: int, stem_type: str='', conv_layer: Optional[Callable]=None, act_layer: Optional[Callable]=None, preact_feature: bool=True): stem_stride = 2 stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') stem = OrderedDict() assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') if 'deep' in stem_type: if 'quad' in stem_type: assert not 'pool' in stem_type stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) strides = (2, 1, 1, 2) stem_stride = 4 stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') else: if 'tiered' in stem_type: stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) else: stem_chs = (out_chs // 2, out_chs // 2, out_chs) strides = (2, 1, 1) stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') last_idx = len(stem_chs) - 1 for (i, (c, s)) in enumerate(zip(stem_chs, strides)): stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) if i != last_idx: stem[f'act{i + 2}'] = act_layer(inplace=True) in_chs = c elif '3x3' in stem_type: stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) else: stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) if 'pool' in stem_type: stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) stem_stride = 4 return (nn.Sequential(stem), stem_stride, stem_feature) _nonlin_gamma = dict(identity=1.0, celu=1.270926833152771, elu=1.2716004848480225, gelu=1.7015043497085571, leaky_relu=1.70590341091156, log_sigmoid=1.9193484783172607, log_softmax=1.0002083778381348, relu=1.7139588594436646, relu6=1.7131484746932983, selu=1.0008515119552612, sigmoid=4.803835391998291, silu=1.7881293296813965, softsign=2.338853120803833, softplus=1.9203323125839233, tanh=1.5939117670059204) class NormFreeNet(nn.Module): def __init__(self, cfg: NfCfg, num_classes: int=1000, in_chans: int=3, global_pool: str='avg', output_stride: int=32, drop_rate: float=0.0, drop_path_rate: float=0.0, **kwargs): super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) assert cfg.act_layer in _nonlin_gamma, f'Please add non-linearity constants for activation ({cfg.act_layer}).' conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d if cfg.gamma_in_act: act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) else: act_layer = get_act_layer(cfg.act_layer) conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) (self.stem, stem_stride, stem_feat) = create_stem(in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer) self.feature_info = [stem_feat] drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] prev_chs = stem_chs net_stride = stem_stride dilation = 1 expected_var = 1.0 stages = [] for (stage_idx, stage_depth) in enumerate(cfg.depths): stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for block_idx in range(cfg.depths[stage_idx]): first_block = block_idx == 0 and stage_idx == 0 out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) blocks += [NormFreeBlock(in_chs=prev_chs, out_chs=out_chs, alpha=cfg.alpha, beta=1.0 / expected_var ** 0.5, stride=stride if block_idx == 0 else 1, dilation=dilation, first_dilation=first_dilation, group_size=cfg.group_size, bottle_ratio=1.0 if cfg.reg and first_block else cfg.bottle_ratio, ch_div=cfg.ch_div, reg=cfg.reg, extra_conv=cfg.extra_conv, skipinit=cfg.skipinit, attn_layer=attn_layer, attn_gain=cfg.attn_gain, act_layer=act_layer, conv_layer=conv_layer, drop_path_rate=drop_path_rates[stage_idx][block_idx])] if block_idx == 0: expected_var = 1.0 expected_var += cfg.alpha ** 2 first_dilation = dilation prev_chs = out_chs self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] stages += [nn.Sequential(*blocks)] self.stages = nn.Sequential(*stages) if cfg.num_features: self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) self.final_conv = conv_layer(prev_chs, self.num_features, 1) self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') else: self.num_features = prev_chs self.final_conv = nn.Identity() self.final_act = act_layer(inplace=cfg.num_features > 0) self.head_hidden_size = self.num_features self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) for (n, m) in self.named_modules(): if 'fc' in n and isinstance(m, nn.Linear): if cfg.zero_init_fc: nn.init.zeros_(m.weight) else: nn.init.normal_(m.weight, 0.0, 0.01) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks=[('^stages\\.(\\d+)' if coarse else '^stages\\.(\\d+)\\.(\\d+)', None), ('^final_conv', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) x = self.final_act(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _nfres_cfg(depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None): attn_kwargs = attn_kwargs or {} cfg = NfCfg(depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs) return cfg def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): num_features = 1280 * channels[-1] // 440 attn_kwargs = dict(rd_ratio=0.5) cfg = NfCfg(depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs) return cfg def _nfnet_cfg(depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2.0, act_layer='gelu', attn_layer='se', attn_kwargs=None): num_features = int(channels[-1] * feat_mult) attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) cfg = NfCfg(depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs) return cfg def _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True): cfg = NfCfg(depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5)) return cfg model_cfgs = dict(dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), nfnet_l0=_nfnet_cfg(depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), eca_nfnet_l0=_nfnet_cfg(depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l1=_nfnet_cfg(depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l2=_nfnet_cfg(depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l3=_nfnet_cfg(depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1 / 16)), nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1 / 16)), nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1 / 16)), nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict())) def _create_normfreenet(variant, pretrained=False, **kwargs): model_cfg = model_cfgs[variant] feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg(NormFreeNet, variant, pretrained, model_cfg=model_cfg, feature_cfg=feature_cfg, **kwargs) def _dcfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'dm_nfnet_f0.dm_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=0.9, crop_mode='squash'), 'dm_nfnet_f1.dm_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91, crop_mode='squash'), 'dm_nfnet_f2.dm_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92, crop_mode='squash'), 'dm_nfnet_f3.dm_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94, crop_mode='squash'), 'dm_nfnet_f4.dm_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951, crop_mode='squash'), 'dm_nfnet_f5.dm_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954, crop_mode='squash'), 'dm_nfnet_f6.dm_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956, crop_mode='squash'), 'nfnet_f0': _dcfg(url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), 'nfnet_f1': _dcfg(url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), 'nfnet_f2': _dcfg(url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), 'nfnet_f3': _dcfg(url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), 'nfnet_f4': _dcfg(url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), 'nfnet_f5': _dcfg(url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), 'nfnet_f6': _dcfg(url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), 'nfnet_f7': _dcfg(url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), 'nfnet_l0.ra2_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l0.ra2_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l1.ra2_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'eca_nfnet_l2.ra3_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'eca_nfnet_l3': _dcfg(url='', pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), test_crop_pct=1.0), 'nf_regnet_b0': _dcfg(url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), 'nf_regnet_b1.ra2_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), 'nf_regnet_b2': _dcfg(url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), 'nf_regnet_b3': _dcfg(url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), 'nf_regnet_b4': _dcfg(url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), 'nf_regnet_b5': _dcfg(url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), 'nf_resnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_resnet50.ra2_in1k': _dcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), 'nf_resnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet101': _dcfg(url='', first_conv='stem.conv')}) @register_model def dm_nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) @register_model def nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) @register_model def nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) @register_model def nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) @register_model def nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) @register_model def nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) @register_model def nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f7(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) @register_model def nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l1(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l2(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l3(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b0(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b1(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b2(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b3(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b4(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b5(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) @register_model def nf_resnet26(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) @register_model def nf_resnet50(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) @register_model def nf_resnet101(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) @register_model def nf_seresnet26(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) @register_model def nf_seresnet50(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) @register_model def nf_seresnet101(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet26(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet50(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet101(pretrained=False, **kwargs) -> NormFreeNet: return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/pit.py """""" import math import re from functools import partial from typing import Optional, Sequence, Tuple import torch from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, to_2tuple from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .vision_transformer import Block __all__ = ['PoolingVisionTransformer'] class SequentialTuple(nn.Sequential): def __init__(self, *args): super(SequentialTuple, self).__init__(*args) def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: for module in self: x = module(x) return x class Transformer(nn.Module): def __init__(self, base_dim, depth, heads, mlp_ratio, pool=None, proj_drop=0.0, attn_drop=0.0, drop_path_prob=None, norm_layer=None): super(Transformer, self).__init__() embed_dim = base_dim * heads self.pool = pool self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() self.blocks = nn.Sequential(*[Block(dim=embed_dim, num_heads=heads, mlp_ratio=mlp_ratio, qkv_bias=True, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path_prob[i], norm_layer=partial(nn.LayerNorm, eps=1e-06)) for i in range(depth)]) def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: (x, cls_tokens) = x token_length = cls_tokens.shape[1] if self.pool is not None: (x, cls_tokens) = self.pool(x, cls_tokens) (B, C, H, W) = x.shape x = x.flatten(2).transpose(1, 2) x = torch.cat((cls_tokens, x), dim=1) x = self.norm(x) x = self.blocks(x) cls_tokens = x[:, :token_length] x = x[:, token_length:] x = x.transpose(1, 2).reshape(B, C, H, W) return (x, cls_tokens) class Pooling(nn.Module): def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): super(Pooling, self).__init__() self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, padding_mode=padding_mode, groups=in_feature) self.fc = nn.Linear(in_feature, out_feature) def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: x = self.conv(x) cls_token = self.fc(cls_token) return (x, cls_token) class ConvEmbedding(nn.Module): def __init__(self, in_channels, out_channels, img_size: int=224, patch_size: int=16, stride: int=8, padding: int=0): super(ConvEmbedding, self).__init__() padding = padding self.img_size = to_2tuple(img_size) self.patch_size = to_2tuple(patch_size) self.height = math.floor((self.img_size[0] + 2 * padding - self.patch_size[0]) / stride + 1) self.width = math.floor((self.img_size[1] + 2 * padding - self.patch_size[1]) / stride + 1) self.grid_size = (self.height, self.width) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) def forward(self, x): x = self.conv(x) return x class PoolingVisionTransformer(nn.Module): def __init__(self, img_size: int=224, patch_size: int=16, stride: int=8, stem_type: str='overlap', base_dims: Sequence[int]=(48, 48, 48), depth: Sequence[int]=(2, 6, 4), heads: Sequence[int]=(2, 4, 8), mlp_ratio: float=4, num_classes=1000, in_chans=3, global_pool='token', distilled=False, drop_rate=0.0, pos_drop_drate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0): super(PoolingVisionTransformer, self).__init__() assert global_pool in ('token',) self.base_dims = base_dims self.heads = heads embed_dim = base_dims[0] * heads[0] self.num_classes = num_classes self.global_pool = global_pool self.num_tokens = 2 if distilled else 1 self.feature_info = [] self.patch_embed = ConvEmbedding(in_chans, embed_dim, img_size, patch_size, stride) self.pos_embed = nn.Parameter(torch.randn(1, embed_dim, self.patch_embed.height, self.patch_embed.width)) self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_drate) transformers = [] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] prev_dim = embed_dim for i in range(len(depth)): pool = None embed_dim = base_dims[i] * heads[i] if i > 0: pool = Pooling(prev_dim, embed_dim, stride=2) transformers += [Transformer(base_dims[i], depth[i], heads[i], mlp_ratio, pool=pool, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path_prob=dpr[i])] prev_dim = embed_dim self.feature_info += [dict(num_chs=prev_dim, reduction=(stride - 1) * 2 ** i, module=f'transformers.{i}')] self.transformers = SequentialTuple(*transformers) self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-06) self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = None if distilled: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.distilled_training = False trunc_normal_(self.pos_embed, std=0.02) trunc_normal_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' def get_classifier(self) -> nn.Module: if self.head_dist is not None: return (self.head, self.head_dist) else: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.head_dist is not None: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) x = self.pos_drop(x + self.pos_embed) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) (x, cls_tokens) = self.transformers((x, cls_tokens)) cls_tokens = self.norm(cls_tokens) return cls_tokens def forward_head(self, x, pre_logits: bool=False) -> torch.Tensor: if self.head_dist is not None: assert self.global_pool == 'token' (x, x_dist) = (x[:, 0], x[:, 1]) x = self.head_drop(x) x_dist = self.head_drop(x) if not pre_logits: x = self.head(x) x_dist = self.head_dist(x_dist) if self.distilled_training and self.training and (not torch.jit.is_scripting()): return (x, x_dist) else: return (x + x_dist) / 2 else: if self.global_pool == 'token': x = x[:, 0] x = self.head_drop(x) if not pre_logits: x = self.head(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): out_dict = {} p_blocks = re.compile('pools\\.(\\d)\\.') for (k, v) in state_dict.items(): k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1)) + 1}.pool.', k) out_dict[k] = v return out_dict def _create_pit(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(PoolingVisionTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(feature_cls='hook', no_rewrite=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'pit_ti_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_xs_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_s_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_b_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_ti_distilled_224.in1k': _cfg(hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_xs_distilled_224.in1k': _cfg(hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_s_distilled_224.in1k': _cfg(hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_b_distilled_224.in1k': _cfg(hf_hub_id='timm/', classifier=('head', 'head_dist'))}) @register_model def pit_b_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=14, stride=7, base_dims=[64, 64, 64], depth=[3, 6, 4], heads=[4, 8, 16], mlp_ratio=4) return _create_pit('pit_b_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_s_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[3, 6, 12], mlp_ratio=4) return _create_pit('pit_s_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_xs_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4) return _create_pit('pit_xs_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_ti_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4) return _create_pit('pit_ti_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_b_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=14, stride=7, base_dims=[64, 64, 64], depth=[3, 6, 4], heads=[4, 8, 16], mlp_ratio=4, distilled=True) return _create_pit('pit_b_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_s_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[3, 6, 12], mlp_ratio=4, distilled=True) return _create_pit('pit_s_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_xs_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, distilled=True) return _create_pit('pit_xs_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_ti_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict(patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, distilled=True) return _create_pit('pit_ti_distilled_224', pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/pnasnet.py """""" from collections import OrderedDict from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['PNASNet5Large'] class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = create_conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels) self.pointwise_conv2d = create_conv2d(in_channels, out_channels, kernel_size=1, padding=padding) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): super(BranchSeparables, self).__init__() middle_channels = out_channels if stem_cell else in_channels self.act_1 = nn.ReLU() self.separable_1 = SeparableConv2d(in_channels, middle_channels, kernel_size, stride=stride, padding=padding) self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) self.act_2 = nn.ReLU() self.separable_2 = SeparableConv2d(middle_channels, out_channels, kernel_size, stride=1, padding=padding) self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act_1(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.act_2(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class ActConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): super(ActConvBn, self).__init__() self.act = nn.ReLU() self.conv = create_conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act(x) x = self.conv(x) x = self.bn(x) return x class FactorizedReduction(nn.Module): def __init__(self, in_channels, out_channels, padding=''): super(FactorizedReduction, self).__init__() self.act = nn.ReLU() self.path_1 = nn.Sequential(OrderedDict([('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding))])) self.path_2 = nn.Sequential(OrderedDict([('pad', nn.ZeroPad2d((-1, 1, -1, 1))), ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding))])) self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act(x) x_path1 = self.path_1(x) x_path2 = self.path_2(x) out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) return out class CellBase(nn.Module): def cell_forward(self, x_left, x_right): x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) x_comb_iter_3_right = self.comb_iter_3_right(x_right) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_left) if self.comb_iter_4_right is not None: x_comb_iter_4_right = self.comb_iter_4_right(x_right) else: x_comb_iter_4_right = x_right x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class CellStem0(CellBase): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(CellStem0, self).__init__() self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) self.comb_iter_0_right = nn.Sequential(OrderedDict([('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001))])) self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) self.comb_iter_2_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) self.comb_iter_4_left = BranchSeparables(in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) self.comb_iter_4_right = ActConvBn(out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) def forward(self, x_left): x_right = self.conv_1x1(x_left) x_out = self.cell_forward(x_left, x_right) return x_out class Cell(CellBase): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', is_reduction=False, match_prev_layer_dims=False): super(Cell, self).__init__() stride = 2 if is_reduction else 1 self.match_prev_layer_dimensions = match_prev_layer_dims if match_prev_layer_dims: self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) else: self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_2_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) if is_reduction: self.comb_iter_4_right = ActConvBn(out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) else: self.comb_iter_4_right = None def forward(self, x_left, x_right): x_left = self.conv_prev_1x1(x_left) x_right = self.conv_1x1(x_right) x_out = self.cell_forward(x_left, x_right) return x_out class PNASNet5Large(nn.Module): def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0.0, global_pool='avg', pad_type=''): super(PNASNet5Large, self).__init__() self.num_classes = num_classes self.num_features = self.head_hidden_size = 4320 assert output_stride == 32 self.conv_0 = ConvNormAct(in_chans, 96, kernel_size=3, stride=2, padding=0, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) self.cell_stem_0 = CellStem0(in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) self.cell_stem_1 = Cell(in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, match_prev_layer_dims=True, is_reduction=True) self.cell_0 = Cell(in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, match_prev_layer_dims=True) self.cell_1 = Cell(in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_2 = Cell(in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_3 = Cell(in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_4 = Cell(in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, is_reduction=True) self.cell_5 = Cell(in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, match_prev_layer_dims=True) self.cell_6 = Cell(in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) self.cell_7 = Cell(in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) self.cell_8 = Cell(in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, is_reduction=True) self.cell_9 = Cell(in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, match_prev_layer_dims=True) self.cell_10 = Cell(in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) self.cell_11 = Cell(in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) self.act = nn.ReLU() self.feature_info = [dict(num_chs=96, reduction=2, module='conv_0'), dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), dict(num_chs=4320, reduction=32, module='act')] (self.global_pool, self.head_drop, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^conv_0|cell_stem_[01]', blocks='^cell_(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.last_linear def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x_conv_0 = self.conv_0(x) x_stem_0 = self.cell_stem_0(x_conv_0) x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) x_cell_0 = self.cell_0(x_stem_0, x_stem_1) x_cell_1 = self.cell_1(x_stem_1, x_cell_0) x_cell_2 = self.cell_2(x_cell_0, x_cell_1) x_cell_3 = self.cell_3(x_cell_1, x_cell_2) x_cell_4 = self.cell_4(x_cell_2, x_cell_3) x_cell_5 = self.cell_5(x_cell_3, x_cell_4) x_cell_6 = self.cell_6(x_cell_4, x_cell_5) x_cell_7 = self.cell_7(x_cell_5, x_cell_6) x_cell_8 = self.cell_8(x_cell_6, x_cell_7) x_cell_9 = self.cell_9(x_cell_7, x_cell_8) x_cell_10 = self.cell_10(x_cell_8, x_cell_9) x_cell_11 = self.cell_11(x_cell_9, x_cell_10) x = self.act(x_cell_11) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_pnasnet(variant, pretrained=False, **kwargs): return build_model_with_cfg(PNASNet5Large, variant, pretrained, feature_cfg=dict(feature_cls='hook', no_rewrite=True), **kwargs) default_cfgs = generate_default_cfgs({'pnasnet5large.tf_in1k': {'hf_hub_id': 'timm/', 'input_size': (3, 331, 331), 'pool_size': (11, 11), 'crop_pct': 0.911, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv_0.conv', 'classifier': 'last_linear'}}) @register_model def pnasnet5large(pretrained=False, **kwargs) -> PNASNet5Large: model_kwargs = dict(pad_type='same', **kwargs) return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs) # File: pytorch-image-models-main/timm/models/pvt_v2.py """""" import math from typing import Callable, List, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, to_ntuple, trunc_normal_, LayerNorm, use_fused_attn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['PyramidVisionTransformerV2'] class MlpWithDepthwiseConv(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, extra_relu=False): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.relu = nn.ReLU() if extra_relu else nn.Identity() self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x, feat_size: List[int]): x = self.fc1(x) (B, N, C) = x.shape x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1]) x = self.relu(x) x = self.dwconv(x) x = x.flatten(2).transpose(1, 2) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, sr_ratio=1, linear_attn=False, qkv_bias=True, attn_drop=0.0, proj_drop=0.0): super().__init__() assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.' self.dim = dim self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) if not linear_attn: self.pool = None if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) else: self.sr = None self.norm = None self.act = None else: self.pool = nn.AdaptiveAvgPool2d(7) self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1) self.norm = nn.LayerNorm(dim) self.act = nn.GELU() def forward(self, x, feat_size: List[int]): (B, N, C) = x.shape (H, W) = feat_size q = self.q(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) if self.pool is not None: x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.sr(self.pool(x)).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) x = self.act(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) elif self.sr is not None: x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) else: kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (k, v) = kv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, sr_ratio=1, linear_attn=False, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, sr_ratio=sr_ratio, linear_attn=linear_attn, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = MlpWithDepthwiseConv(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, extra_relu=linear_attn) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x, feat_size: List[int]): x = x + self.drop_path1(self.attn(self.norm1(x), feat_size)) x = x + self.drop_path2(self.mlp(self.norm2(x), feat_size)) return x class OverlapPatchEmbed(nn.Module): def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768): super().__init__() patch_size = to_2tuple(patch_size) assert max(patch_size) > stride, 'Set larger patch_size than stride' self.patch_size = patch_size self.proj = nn.Conv2d(in_chans, embed_dim, patch_size, stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2)) self.norm = nn.LayerNorm(embed_dim) def forward(self, x): x = self.proj(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) return x class PyramidVisionTransformerStage(nn.Module): def __init__(self, dim: int, dim_out: int, depth: int, downsample: bool=True, num_heads: int=8, sr_ratio: int=1, linear_attn: bool=False, mlp_ratio: float=4.0, qkv_bias: bool=True, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: Union[List[float], float]=0.0, norm_layer: Callable=LayerNorm): super().__init__() self.grad_checkpointing = False if downsample: self.downsample = OverlapPatchEmbed(patch_size=3, stride=2, in_chans=dim, embed_dim=dim_out) else: assert dim == dim_out self.downsample = None self.blocks = nn.ModuleList([Block(dim=dim_out, num_heads=num_heads, sr_ratio=sr_ratio, linear_attn=linear_attn, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) self.norm = norm_layer(dim_out) def forward(self, x): if self.downsample is not None: x = self.downsample(x) (B, H, W, C) = x.shape feat_size = (H, W) x = x.reshape(B, -1, C) for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint.checkpoint(blk, x, feat_size) else: x = blk(x, feat_size) x = self.norm(x) x = x.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2).contiguous() return x class PyramidVisionTransformerV2(nn.Module): def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', depths=(3, 4, 6, 3), embed_dims=(64, 128, 256, 512), num_heads=(1, 2, 4, 8), sr_ratios=(8, 4, 2, 1), mlp_ratios=(8.0, 8.0, 4.0, 4.0), qkv_bias=True, linear=False, drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=LayerNorm): super().__init__() self.num_classes = num_classes assert global_pool in ('avg', '') self.global_pool = global_pool self.depths = depths num_stages = len(depths) mlp_ratios = to_ntuple(num_stages)(mlp_ratios) num_heads = to_ntuple(num_stages)(num_heads) sr_ratios = to_ntuple(num_stages)(sr_ratios) assert len(embed_dims) == num_stages self.feature_info = [] self.patch_embed = OverlapPatchEmbed(patch_size=7, stride=4, in_chans=in_chans, embed_dim=embed_dims[0]) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] cur = 0 prev_dim = embed_dims[0] stages = [] for i in range(num_stages): stages += [PyramidVisionTransformerStage(dim=prev_dim, dim_out=embed_dims[i], depth=depths[i], downsample=i > 0, num_heads=num_heads[i], sr_ratio=sr_ratios[i], mlp_ratio=mlp_ratios[i], linear_attn=linear, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)] prev_dim = embed_dims[i] cur += depths[i] self.feature_info += [dict(num_chs=prev_dim, reduction=4 * 2 ** i, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = embed_dims[-1] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def freeze_patch_emb(self): self.patch_embed.requires_grad = False @torch.jit.ignore def no_weight_decay(self): return {} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^patch_embed', blocks='^stages\\.(\\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('avg', '') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x.mean(dim=(-1, -2)) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'patch_embed.proj.weight' in state_dict: return state_dict out_dict = {} import re for (k, v) in state_dict.items(): if k.startswith('patch_embed'): k = k.replace('patch_embed1', 'patch_embed') k = k.replace('patch_embed2', 'stages.1.downsample') k = k.replace('patch_embed3', 'stages.2.downsample') k = k.replace('patch_embed4', 'stages.3.downsample') k = k.replace('dwconv.dwconv', 'dwconv') k = re.sub('block(\\d+).(\\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.blocks.{x.group(2)}', k) k = re.sub('^norm(\\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.norm', k) out_dict[k] = v return out_dict def _create_pvt2(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(4)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(PyramidVisionTransformerV2, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'fixed_input_size': False, **kwargs} default_cfgs = generate_default_cfgs({'pvt_v2_b0.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b1.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b2.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b3.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b4.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b5.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b2_li.in1k': _cfg(hf_hub_id='timm/')}) @register_model def pvt_v2_b0(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(2, 2, 2, 2), embed_dims=(32, 64, 160, 256), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b1(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(2, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b2(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b3(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 4, 18, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b4(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 8, 27, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b4', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b5(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 6, 40, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), mlp_ratios=(4, 4, 4, 4)) return _create_pvt2('pvt_v2_b5', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b2_li(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), linear=True) return _create_pvt2('pvt_v2_b2_li', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/rdnet.py """""" from functools import partial from typing import List, Optional, Tuple, Union, Callable import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, NormMlpClassifierHead, ClassifierHead, EffectiveSEModule, make_divisible, get_act_layer, get_norm_layer from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['RDNet'] class Block(nn.Module): def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer): super().__init__() self.layers = nn.Sequential(nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3), norm_layer(in_chs), nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0), act_layer(), nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0)) def forward(self, x): return self.layers(x) class BlockESE(nn.Module): def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer): super().__init__() self.layers = nn.Sequential(nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3), norm_layer(in_chs), nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0), act_layer(), nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0), EffectiveSEModule(out_chs)) def forward(self, x): return self.layers(x) def _get_block_type(block: str): block = block.lower().strip() if block == 'block': return Block elif block == 'blockese': return BlockESE else: assert False, f'Unknown block type ({block}).' class DenseBlock(nn.Module): def __init__(self, num_input_features: int=64, growth_rate: int=64, bottleneck_width_ratio: float=4.0, drop_path_rate: float=0.0, drop_rate: float=0.0, rand_gather_step_prob: float=0.0, block_idx: int=0, block_type: str='Block', ls_init_value: float=1e-06, norm_layer: str='layernorm2d', act_layer: str='gelu'): super().__init__() self.drop_rate = drop_rate self.drop_path_rate = drop_path_rate self.rand_gather_step_prob = rand_gather_step_prob self.block_idx = block_idx self.growth_rate = growth_rate self.gamma = nn.Parameter(ls_init_value * torch.ones(growth_rate)) if ls_init_value > 0 else None growth_rate = int(growth_rate) inter_chs = int(num_input_features * bottleneck_width_ratio / 8) * 8 self.drop_path = DropPath(drop_path_rate) self.layers = _get_block_type(block_type)(in_chs=num_input_features, inter_chs=inter_chs, out_chs=growth_rate, norm_layer=norm_layer, act_layer=act_layer) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: x = torch.cat(x, 1) x = self.layers(x) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) return x class DenseStage(nn.Sequential): def __init__(self, num_block, num_input_features, drop_path_rates, growth_rate, **kwargs): super().__init__() for i in range(num_block): layer = DenseBlock(num_input_features=num_input_features, growth_rate=growth_rate, drop_path_rate=drop_path_rates[i], block_idx=i, **kwargs) num_input_features += growth_rate self.add_module(f'dense_block{i}', layer) self.num_out_features = num_input_features def forward(self, init_feature: torch.Tensor) -> torch.Tensor: features = [init_feature] for module in self: new_feature = module(features) features.append(new_feature) return torch.cat(features, 1) class RDNet(nn.Module): def __init__(self, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', growth_rates: Union[List[int], Tuple[int]]=(64, 104, 128, 128, 128, 128, 224), num_blocks_list: Union[List[int], Tuple[int]]=(3, 3, 3, 3, 3, 3, 3), block_type: Union[List[int], Tuple[int]]=('Block',) * 2 + ('BlockESE',) * 5, is_downsample_block: Union[List[bool], Tuple[bool]]=(None, True, True, False, False, False, True), bottleneck_width_ratio: float=4.0, transition_compression_ratio: float=0.5, ls_init_value: float=1e-06, stem_type: str='patch', patch_size: int=4, num_init_features: int=64, head_init_scale: float=1.0, head_norm_first: bool=False, conv_bias: bool=True, act_layer: Union[str, Callable]='gelu', norm_layer: str='layernorm2d', norm_eps: Optional[float]=None, drop_rate: float=0.0, drop_path_rate: float=0.0): super().__init__() assert len(growth_rates) == len(num_blocks_list) == len(is_downsample_block) act_layer = get_act_layer(act_layer) norm_layer = get_norm_layer(norm_layer) if norm_eps is not None: norm_layer = partial(norm_layer, eps=norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate assert stem_type in ('patch', 'overlap', 'overlap_tiered') if stem_type == 'patch': self.stem = nn.Sequential(nn.Conv2d(in_chans, num_init_features, kernel_size=patch_size, stride=patch_size, bias=conv_bias), norm_layer(num_init_features)) stem_stride = patch_size else: mid_chs = make_divisible(num_init_features // 2) if 'tiered' in stem_type else num_init_features self.stem = nn.Sequential(nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.Conv2d(mid_chs, num_init_features, kernel_size=3, stride=2, padding=1, bias=conv_bias), norm_layer(num_init_features)) stem_stride = 4 self.feature_info = [] self.num_stages = len(growth_rates) curr_stride = stem_stride num_features = num_init_features dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(num_blocks_list)).split(num_blocks_list)] dense_stages = [] for i in range(self.num_stages): dense_stage_layers = [] if i != 0: compressed_num_features = int(num_features * transition_compression_ratio / 8) * 8 k_size = stride = 1 if is_downsample_block[i]: curr_stride *= 2 k_size = stride = 2 dense_stage_layers.append(norm_layer(num_features)) dense_stage_layers.append(nn.Conv2d(num_features, compressed_num_features, kernel_size=k_size, stride=stride, padding=0)) num_features = compressed_num_features stage = DenseStage(num_block=num_blocks_list[i], num_input_features=num_features, growth_rate=growth_rates[i], bottleneck_width_ratio=bottleneck_width_ratio, drop_rate=drop_rate, drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, block_type=block_type[i], norm_layer=norm_layer, act_layer=act_layer) dense_stage_layers.append(stage) num_features += num_blocks_list[i] * growth_rates[i] if i + 1 == self.num_stages or (i + 1 != self.num_stages and is_downsample_block[i + 1]): self.feature_info += [dict(num_chs=num_features, reduction=curr_stride, module=f'dense_stages.{i}', growth_rate=growth_rates[i])] dense_stages.append(nn.Sequential(*dense_stage_layers)) self.dense_stages = nn.Sequential(*dense_stages) self.num_features = self.head_hidden_size = num_features if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.dense_stages) + 1, indices) feat_idx = 0 x = self.stem(x) if feat_idx in take_indices: intermediates.append(x) if torch.jit.is_scripting() or not stop_early: dense_stages = self.dense_stages else: dense_stages = self.dense_stages[:max_index] for stage in dense_stages: feat_idx += 1 x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates x = self.norm_pre(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.dense_stages) + 1, indices) self.dense_stages = self.dense_stages[:max_index] if prune_norm: self.norm_pre = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.dense_stages(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.head(x) return x @torch.jit.ignore def group_matcher(self, coarse=False): assert not coarse, 'coarse grouping is not implemented for RDNet' return dict(stem='^stem', blocks='^dense_stages\\.(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.dense_stages: s.grad_checkpointing = enable def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight) elif isinstance(module, nn.BatchNorm2d): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) elif isinstance(module, nn.Linear): nn.init.constant_(module.bias, 0) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): if 'stem.0.weight' in state_dict: return state_dict if 'model' in state_dict: state_dict = state_dict['model'] out_dict = {} for (k, v) in state_dict.items(): k = k.replace('stem.stem.', 'stem.') out_dict[k] = v return out_dict def _create_rdnet(variant, pretrained=False, **kwargs): model = build_model_with_cfg(RDNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', 'paper_ids': 'arXiv:2403.19588', 'paper_name': 'DenseNets Reloaded: Paradigm Shift Beyond ResNets and ViTs', 'origin_url': 'https://github.com/naver-ai/rdnet', **kwargs} default_cfgs = generate_default_cfgs({'rdnet_tiny.nv_in1k': _cfg(hf_hub_id='naver-ai/rdnet_tiny.nv_in1k'), 'rdnet_small.nv_in1k': _cfg(hf_hub_id='naver-ai/rdnet_small.nv_in1k'), 'rdnet_base.nv_in1k': _cfg(hf_hub_id='naver-ai/rdnet_base.nv_in1k'), 'rdnet_large.nv_in1k': _cfg(hf_hub_id='naver-ai/rdnet_large.nv_in1k'), 'rdnet_large.nv_in1k_ft_in1k_384': _cfg(hf_hub_id='naver-ai/rdnet_large.nv_in1k_ft_in1k_384', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12))}) @register_model def rdnet_tiny(pretrained=False, **kwargs): n_layer = 7 model_args = {'num_init_features': 64, 'growth_rates': [64] + [104] + [128] * 4 + [224], 'num_blocks_list': [3] * n_layer, 'is_downsample_block': (None, True, True, False, False, False, True), 'transition_compression_ratio': 0.5, 'block_type': ['Block'] + ['Block'] + ['BlockESE'] * 4 + ['BlockESE']} model = _create_rdnet('rdnet_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def rdnet_small(pretrained=False, **kwargs): n_layer = 11 model_args = {'num_init_features': 72, 'growth_rates': [64] + [128] + [128] * (n_layer - 4) + [240] * 2, 'num_blocks_list': [3] * n_layer, 'is_downsample_block': (None, True, True, False, False, False, False, False, False, True, False), 'transition_compression_ratio': 0.5, 'block_type': ['Block'] + ['Block'] + ['BlockESE'] * (n_layer - 4) + ['BlockESE'] * 2} model = _create_rdnet('rdnet_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def rdnet_base(pretrained=False, **kwargs): n_layer = 11 model_args = {'num_init_features': 120, 'growth_rates': [96] + [128] + [168] * (n_layer - 4) + [336] * 2, 'num_blocks_list': [3] * n_layer, 'is_downsample_block': (None, True, True, False, False, False, False, False, False, True, False), 'transition_compression_ratio': 0.5, 'block_type': ['Block'] + ['Block'] + ['BlockESE'] * (n_layer - 4) + ['BlockESE'] * 2} model = _create_rdnet('rdnet_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def rdnet_large(pretrained=False, **kwargs): n_layer = 12 model_args = {'num_init_features': 144, 'growth_rates': [128] + [192] + [256] * (n_layer - 4) + [360] * 2, 'num_blocks_list': [3] * n_layer, 'is_downsample_block': (None, True, True, False, False, False, False, False, False, False, True, False), 'transition_compression_ratio': 0.5, 'block_type': ['Block'] + ['Block'] + ['BlockESE'] * (n_layer - 4) + ['BlockESE'] * 2} model = _create_rdnet('rdnet_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/regnet.py """""" import math from dataclasses import dataclass, replace from functools import partial from typing import Callable, List, Optional, Union, Tuple import numpy as np import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, AvgPool2dSame, ConvNormAct, SEModule, DropPath, GroupNormAct from timm.layers import get_act_layer, get_norm_act_layer, create_conv2d, make_divisible from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq, named_apply from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['RegNet', 'RegNetCfg'] @dataclass class RegNetCfg: depth: int = 21 w0: int = 80 wa: float = 42.63 wm: float = 2.66 group_size: int = 24 bottle_ratio: float = 1.0 se_ratio: float = 0.0 group_min_ratio: float = 0.0 stem_width: int = 32 downsample: Optional[str] = 'conv1x1' linear_out: bool = False preact: bool = False num_features: int = 0 act_layer: Union[str, Callable] = 'relu' norm_layer: Union[str, Callable] = 'batchnorm' def quantize_float(f, q): return int(round(f / q) * q) def adjust_widths_groups_comp(widths, bottle_ratios, groups, min_ratio=0.0): bottleneck_widths = [int(w * b) for (w, b) in zip(widths, bottle_ratios)] groups = [min(g, w_bot) for (g, w_bot) in zip(groups, bottleneck_widths)] if min_ratio: bottleneck_widths = [make_divisible(w_bot, g, min_ratio) for (w_bot, g) in zip(bottleneck_widths, groups)] else: bottleneck_widths = [quantize_float(w_bot, g) for (w_bot, g) in zip(bottleneck_widths, groups)] widths = [int(w_bot / b) for (w_bot, b) in zip(bottleneck_widths, bottle_ratios)] return (widths, groups) def generate_regnet(width_slope, width_initial, width_mult, depth, group_size, quant=8): assert width_slope >= 0 and width_initial > 0 and (width_mult > 1) and (width_initial % quant == 0) widths_cont = np.arange(depth) * width_slope + width_initial width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) widths = np.round(np.divide(width_initial * np.power(width_mult, width_exps), quant)) * quant (num_stages, max_stage) = (len(np.unique(widths)), width_exps.max() + 1) groups = np.array([group_size for _ in range(num_stages)]) return (widths.astype(int).tolist(), num_stages, groups.astype(int).tolist()) def downsample_conv(in_chs, out_chs, kernel_size=1, stride=1, dilation=1, norm_layer=None, preact=False): norm_layer = norm_layer or nn.BatchNorm2d kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size dilation = dilation if kernel_size > 1 else 1 if preact: return create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation) else: return ConvNormAct(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, apply_act=False) def downsample_avg(in_chs, out_chs, kernel_size=1, stride=1, dilation=1, norm_layer=None, preact=False): norm_layer = norm_layer or nn.BatchNorm2d avg_stride = stride if dilation == 1 else 1 pool = nn.Identity() if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) if preact: conv = create_conv2d(in_chs, out_chs, 1, stride=1) else: conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False) return nn.Sequential(*[pool, conv]) def create_shortcut(downsample_type, in_chs, out_chs, kernel_size, stride, dilation=(1, 1), norm_layer=None, preact=False): assert downsample_type in ('avg', 'conv1x1', '', None) if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: dargs = dict(stride=stride, dilation=dilation[0], norm_layer=norm_layer, preact=preact) if not downsample_type: return None elif downsample_type == 'avg': return downsample_avg(in_chs, out_chs, **dargs) else: return downsample_conv(in_chs, out_chs, kernel_size=kernel_size, **dargs) else: return nn.Identity() class Bottleneck(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=(1, 1), bottle_ratio=1, group_size=1, se_ratio=0.25, downsample='conv1x1', linear_out=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_block=None, drop_path_rate=0.0): super(Bottleneck, self).__init__() act_layer = get_act_layer(act_layer) bottleneck_chs = int(round(out_chs * bottle_ratio)) groups = bottleneck_chs // group_size cargs = dict(act_layer=act_layer, norm_layer=norm_layer) self.conv1 = ConvNormAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) self.conv2 = ConvNormAct(bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, **cargs) if se_ratio: se_channels = int(round(in_chs * se_ratio)) self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) else: self.se = nn.Identity() self.conv3 = ConvNormAct(bottleneck_chs, out_chs, kernel_size=1, apply_act=False, **cargs) self.act3 = nn.Identity() if linear_out else act_layer() self.downsample = create_shortcut(downsample, in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation, norm_layer=norm_layer) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.se(x) x = self.conv3(x) if self.downsample is not None: x = self.drop_path(x) + self.downsample(shortcut) x = self.act3(x) return x class PreBottleneck(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=(1, 1), bottle_ratio=1, group_size=1, se_ratio=0.25, downsample='conv1x1', linear_out=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_block=None, drop_path_rate=0.0): super(PreBottleneck, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) bottleneck_chs = int(round(out_chs * bottle_ratio)) groups = bottleneck_chs // group_size self.norm1 = norm_act_layer(in_chs) self.conv1 = create_conv2d(in_chs, bottleneck_chs, kernel_size=1) self.norm2 = norm_act_layer(bottleneck_chs) self.conv2 = create_conv2d(bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=groups) if se_ratio: se_channels = int(round(in_chs * se_ratio)) self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) else: self.se = nn.Identity() self.norm3 = norm_act_layer(bottleneck_chs) self.conv3 = create_conv2d(bottleneck_chs, out_chs, kernel_size=1) self.downsample = create_shortcut(downsample, in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation, preact=True) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def zero_init_last(self): pass def forward(self, x): x = self.norm1(x) shortcut = x x = self.conv1(x) x = self.norm2(x) x = self.conv2(x) x = self.se(x) x = self.norm3(x) x = self.conv3(x) if self.downsample is not None: x = self.drop_path(x) + self.downsample(shortcut) return x class RegStage(nn.Module): def __init__(self, depth, in_chs, out_chs, stride, dilation, drop_path_rates=None, block_fn=Bottleneck, **block_kwargs): super(RegStage, self).__init__() self.grad_checkpointing = False first_dilation = 1 if dilation in (1, 2) else 2 for i in range(depth): block_stride = stride if i == 0 else 1 block_in_chs = in_chs if i == 0 else out_chs block_dilation = (first_dilation, dilation) dpr = drop_path_rates[i] if drop_path_rates is not None else 0.0 name = 'b{}'.format(i + 1) self.add_module(name, block_fn(block_in_chs, out_chs, stride=block_stride, dilation=block_dilation, drop_path_rate=dpr, **block_kwargs)) first_dilation = dilation def forward(self, x): if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.children(), x) else: for block in self.children(): x = block(x) return x class RegNet(nn.Module): def __init__(self, cfg: RegNetCfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, drop_path_rate=0.0, zero_init_last=True, **kwargs): super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride in (8, 16, 32) cfg = replace(cfg, **kwargs) stem_width = cfg.stem_width na_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer) if cfg.preact: self.stem = create_conv2d(in_chans, stem_width, 3, stride=2) else: self.stem = ConvNormAct(in_chans, stem_width, 3, stride=2, **na_args) self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] prev_width = stem_width curr_stride = 2 (per_stage_args, common_args) = self._get_stage_args(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate) assert len(per_stage_args) == 4 block_fn = PreBottleneck if cfg.preact else Bottleneck for (i, stage_args) in enumerate(per_stage_args): stage_name = 's{}'.format(i + 1) self.add_module(stage_name, RegStage(in_chs=prev_width, block_fn=block_fn, **stage_args, **common_args)) prev_width = stage_args['out_chs'] curr_stride *= stage_args['stride'] self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] if cfg.num_features: self.final_conv = ConvNormAct(prev_width, cfg.num_features, kernel_size=1, **na_args) self.num_features = cfg.num_features else: final_act = cfg.linear_out or cfg.preact self.final_conv = get_act_layer(cfg.act_layer)() if final_act else nn.Identity() self.num_features = prev_width self.head_hidden_size = self.num_features self.head = ClassifierHead(in_features=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) def _get_stage_args(self, cfg: RegNetCfg, default_stride=2, output_stride=32, drop_path_rate=0.0): (widths, num_stages, stage_gs) = generate_regnet(cfg.wa, cfg.w0, cfg.wm, cfg.depth, cfg.group_size) (stage_widths, stage_depths) = np.unique(widths, return_counts=True) stage_br = [cfg.bottle_ratio for _ in range(num_stages)] stage_strides = [] stage_dilations = [] net_stride = 2 dilation = 1 for _ in range(num_stages): if net_stride >= output_stride: dilation *= default_stride stride = 1 else: stride = default_stride net_stride *= stride stage_strides.append(stride) stage_dilations.append(dilation) stage_dpr = np.split(np.linspace(0, drop_path_rate, sum(stage_depths)), np.cumsum(stage_depths[:-1])) (stage_widths, stage_gs) = adjust_widths_groups_comp(stage_widths, stage_br, stage_gs, min_ratio=cfg.group_min_ratio) arg_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_size', 'drop_path_rates'] per_stage_args = [dict(zip(arg_names, params)) for params in zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_br, stage_gs, stage_dpr)] common_args = dict(downsample=cfg.downsample, se_ratio=cfg.se_ratio, linear_out=cfg.linear_out, act_layer=cfg.act_layer, norm_layer=cfg.norm_layer) return (per_stage_args, common_args) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^s(\\d+)' if coarse else '^s(\\d+)\\.b(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in list(self.children())[1:-1]: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, pool_type=global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(5, indices) feat_idx = 0 x = self.stem(x) if feat_idx in take_indices: intermediates.append(x) layer_names = ('s1', 's2', 's3', 's4') if stop_early: layer_names = layer_names[:max_index] for n in layer_names: feat_idx += 1 x = getattr(self, n)(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == 4: x = self.final_conv(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(5, indices) layer_names = ('s1', 's2', 's3', 's4') layer_names = layer_names[max_index:] for n in layer_names: setattr(self, n, nn.Identity()) if max_index < 4: self.final_conv = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.s1(x) x = self.s2(x) x = self.s3(x) x = self.s4(x) x = self.final_conv(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name='', zero_init_last=False): if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif zero_init_last and hasattr(module, 'zero_init_last'): module.zero_init_last() def _filter_fn(state_dict): state_dict = state_dict.get('model', state_dict) replaces = [('f.a.0', 'conv1.conv'), ('f.a.1', 'conv1.bn'), ('f.b.0', 'conv2.conv'), ('f.b.1', 'conv2.bn'), ('f.final_bn', 'conv3.bn'), ('f.se.excitation.0', 'se.fc1'), ('f.se.excitation.2', 'se.fc2'), ('f.se', 'se'), ('f.c.0', 'conv3.conv'), ('f.c.1', 'conv3.bn'), ('f.c', 'conv3.conv'), ('proj.0', 'downsample.conv'), ('proj.1', 'downsample.bn'), ('proj', 'downsample.conv')] if 'classy_state_dict' in state_dict: import re state_dict = state_dict['classy_state_dict']['base_model']['model'] out = {} for (k, v) in state_dict['trunk'].items(): k = k.replace('_feature_blocks.conv1.stem.0', 'stem.conv') k = k.replace('_feature_blocks.conv1.stem.1', 'stem.bn') k = re.sub('^_feature_blocks.res\\d.block(\\d)-(\\d+)', lambda x: f's{int(x.group(1))}.b{int(x.group(2)) + 1}', k) k = re.sub('s(\\d)\\.b(\\d+)\\.bn', 's\\1.b\\2.downsample.bn', k) for (s, r) in replaces: k = k.replace(s, r) out[k] = v for (k, v) in state_dict['heads'].items(): if 'projection_head' in k or 'prototypes' in k: continue k = k.replace('0.clf.0', 'head.fc') out[k] = v return out if 'stem.0.weight' in state_dict: import re out = {} for (k, v) in state_dict.items(): k = k.replace('stem.0', 'stem.conv') k = k.replace('stem.1', 'stem.bn') k = re.sub('trunk_output.block(\\d)\\.block(\\d+)\\-(\\d+)', lambda x: f's{int(x.group(1))}.b{int(x.group(3)) + 1}', k) for (s, r) in replaces: k = k.replace(s, r) k = k.replace('fc.', 'head.fc.') out[k] = v return out return state_dict model_cfgs = dict(regnetx_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13), regnetx_004=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22), regnetx_004_tv=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22, group_min_ratio=0.9), regnetx_006=RegNetCfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16), regnetx_008=RegNetCfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16), regnetx_016=RegNetCfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18), regnetx_032=RegNetCfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25), regnetx_040=RegNetCfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23), regnetx_064=RegNetCfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17), regnetx_080=RegNetCfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23), regnetx_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19), regnetx_160=RegNetCfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22), regnetx_320=RegNetCfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23), regnety_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25), regnety_004=RegNetCfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25), regnety_006=RegNetCfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25), regnety_008=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25), regnety_008_tv=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25, group_min_ratio=0.9), regnety_016=RegNetCfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25), regnety_032=RegNetCfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25), regnety_040=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25), regnety_064=RegNetCfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25), regnety_080=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25), regnety_080_tv=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25, group_min_ratio=0.9), regnety_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25), regnety_160=RegNetCfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25), regnety_320=RegNetCfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25), regnety_640=RegNetCfg(w0=352, wa=147.48, wm=2.4, group_size=328, depth=20, se_ratio=0.25), regnety_1280=RegNetCfg(w0=456, wa=160.83, wm=2.52, group_size=264, depth=27, se_ratio=0.25), regnety_2560=RegNetCfg(w0=640, wa=230.83, wm=2.53, group_size=373, depth=27, se_ratio=0.25), regnety_040_sgn=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25, act_layer='silu', norm_layer=partial(GroupNormAct, group_size=16)), regnetv_040=RegNetCfg(depth=22, w0=96, wa=31.41, wm=2.24, group_size=64, se_ratio=0.25, preact=True, act_layer='silu'), regnetv_064=RegNetCfg(depth=25, w0=112, wa=33.22, wm=2.27, group_size=72, se_ratio=0.25, preact=True, act_layer='silu', downsample='avg'), regnetz_005=RegNetCfg(depth=21, w0=16, wa=10.7, wm=2.51, group_size=4, bottle_ratio=4.0, se_ratio=0.25, downsample=None, linear_out=True, num_features=1024, act_layer='silu'), regnetz_040=RegNetCfg(depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, downsample=None, linear_out=True, num_features=0, act_layer='silu'), regnetz_040_h=RegNetCfg(depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, downsample=None, linear_out=True, num_features=1536, act_layer='silu')) def _create_regnet(variant, pretrained, **kwargs): return build_model_with_cfg(RegNet, variant, pretrained, model_cfg=model_cfgs[variant], pretrained_filter_fn=_filter_fn, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'test_input_size': (3, 288, 288), 'crop_pct': 0.95, 'test_crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs} def _cfgpyc(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'license': 'mit', 'origin_url': 'https://github.com/facebookresearch/pycls', **kwargs} def _cfgtv2(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.965, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'license': 'bsd-3-clause', 'origin_url': 'https://github.com/pytorch/vision', **kwargs} default_cfgs = generate_default_cfgs({'regnety_032.ra_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth'), 'regnety_040.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_040_ra3-670e1166.pth'), 'regnety_064.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_064_ra3-aa26dc7d.pth'), 'regnety_080.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_080_ra3-1fdc4344.pth'), 'regnety_120.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'regnety_160.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'regnety_160.lion_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'regnety_120.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'regnety_160.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821), 'regnety_040_sgn.untrained': _cfg(url=''), 'regnetv_040.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_040_ra3-c248f51f.pth', first_conv='stem'), 'regnetv_064.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_064_ra3-530616c2.pth', first_conv='stem'), 'regnetz_005.untrained': _cfg(url=''), 'regnetz_040.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040_ra3-9007edf5.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), 'regnetz_040_h.ra3_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040h_ra3-f594343b.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), 'regnety_160.deit_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth'), 'regnetx_004_tv.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth'), 'regnetx_008.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth'), 'regnetx_016.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth'), 'regnetx_032.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth'), 'regnetx_080.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth'), 'regnetx_160.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth'), 'regnetx_320.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth'), 'regnety_004.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth'), 'regnety_008_tv.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth'), 'regnety_016.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth'), 'regnety_032.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth'), 'regnety_080_tv.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth'), 'regnety_160.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth'), 'regnety_320.tv2_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth'), 'regnety_160.swag_ft_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth', license='cc-by-nc-4.0', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_320.swag_ft_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth', license='cc-by-nc-4.0', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_1280.swag_ft_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth', license='cc-by-nc-4.0', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_160.swag_lc_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth', license='cc-by-nc-4.0'), 'regnety_320.swag_lc_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth', license='cc-by-nc-4.0'), 'regnety_1280.swag_lc_in1k': _cfgtv2(hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth', license='cc-by-nc-4.0'), 'regnety_320.seer_ft_in1k': _cfgtv2(hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_640.seer_ft_in1k': _cfgtv2(hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_1280.seer_ft_in1k': _cfgtv2(hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_2560.seer_ft_in1k': _cfgtv2(hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet256_finetuned_in1k_model_final_checkpoint_phase38.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_320.seer': _cfgtv2(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), 'regnety_640.seer': _cfgtv2(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), 'regnety_1280.seer': _cfgtv2(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), 'regnetx_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/')}) @register_model def regnetx_002(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_002', pretrained, **kwargs) @register_model def regnetx_004(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_004', pretrained, **kwargs) @register_model def regnetx_004_tv(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_004_tv', pretrained, **kwargs) @register_model def regnetx_006(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_006', pretrained, **kwargs) @register_model def regnetx_008(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_008', pretrained, **kwargs) @register_model def regnetx_016(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_016', pretrained, **kwargs) @register_model def regnetx_032(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_032', pretrained, **kwargs) @register_model def regnetx_040(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_040', pretrained, **kwargs) @register_model def regnetx_064(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_064', pretrained, **kwargs) @register_model def regnetx_080(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_080', pretrained, **kwargs) @register_model def regnetx_120(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_120', pretrained, **kwargs) @register_model def regnetx_160(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_160', pretrained, **kwargs) @register_model def regnetx_320(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetx_320', pretrained, **kwargs) @register_model def regnety_002(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_002', pretrained, **kwargs) @register_model def regnety_004(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_004', pretrained, **kwargs) @register_model def regnety_006(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_006', pretrained, **kwargs) @register_model def regnety_008(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_008', pretrained, **kwargs) @register_model def regnety_008_tv(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_008_tv', pretrained, **kwargs) @register_model def regnety_016(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_016', pretrained, **kwargs) @register_model def regnety_032(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_032', pretrained, **kwargs) @register_model def regnety_040(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_040', pretrained, **kwargs) @register_model def regnety_064(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_064', pretrained, **kwargs) @register_model def regnety_080(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_080', pretrained, **kwargs) @register_model def regnety_080_tv(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_080_tv', pretrained, **kwargs) @register_model def regnety_120(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_120', pretrained, **kwargs) @register_model def regnety_160(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_160', pretrained, **kwargs) @register_model def regnety_320(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_320', pretrained, **kwargs) @register_model def regnety_640(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_640', pretrained, **kwargs) @register_model def regnety_1280(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_1280', pretrained, **kwargs) @register_model def regnety_2560(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_2560', pretrained, **kwargs) @register_model def regnety_040_sgn(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnety_040_sgn', pretrained, **kwargs) @register_model def regnetv_040(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetv_040', pretrained, **kwargs) @register_model def regnetv_064(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetv_064', pretrained, **kwargs) @register_model def regnetz_005(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetz_005', pretrained, zero_init_last=False, **kwargs) @register_model def regnetz_040(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetz_040', pretrained, zero_init_last=False, **kwargs) @register_model def regnetz_040_h(pretrained=False, **kwargs) -> RegNet: return _create_regnet('regnetz_040_h', pretrained, zero_init_last=False, **kwargs) register_model_deprecations(__name__, {'regnetz_040h': 'regnetz_040_h'}) # File: pytorch-image-models-main/timm/models/repghost.py """""" import copy from functools import partial from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, Linear, make_divisible from ._builder import build_model_with_cfg from ._efficientnet_blocks import SqueezeExcite, ConvBnAct from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['RepGhostNet'] _SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) class RepGhostModule(nn.Module): def __init__(self, in_chs, out_chs, kernel_size=1, dw_size=3, stride=1, relu=True, reparam=True): super(RepGhostModule, self).__init__() self.out_chs = out_chs init_chs = out_chs new_chs = out_chs self.primary_conv = nn.Sequential(nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), nn.ReLU(inplace=True) if relu else nn.Identity()) fusion_conv = [] fusion_bn = [] if reparam: fusion_conv.append(nn.Identity()) fusion_bn.append(nn.BatchNorm2d(init_chs)) self.fusion_conv = nn.Sequential(*fusion_conv) self.fusion_bn = nn.Sequential(*fusion_bn) self.cheap_operation = nn.Sequential(nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs)) self.relu = nn.ReLU(inplace=False) if relu else nn.Identity() def forward(self, x): x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) for (conv, bn) in zip(self.fusion_conv, self.fusion_bn): x2 = x2 + bn(conv(x1)) return self.relu(x2) def get_equivalent_kernel_bias(self): (kernel3x3, bias3x3) = self._fuse_bn_tensor(self.cheap_operation[0], self.cheap_operation[1]) for (conv, bn) in zip(self.fusion_conv, self.fusion_bn): (kernel, bias) = self._fuse_bn_tensor(conv, bn, kernel3x3.shape[0], kernel3x3.device) kernel3x3 += self._pad_1x1_to_3x3_tensor(kernel) bias3x3 += bias return (kernel3x3, bias3x3) @staticmethod def _pad_1x1_to_3x3_tensor(kernel1x1): if kernel1x1 is None: return 0 else: return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) @staticmethod def _fuse_bn_tensor(conv, bn, in_channels=None, device=None): in_channels = in_channels if in_channels else bn.running_mean.shape[0] device = device if device else bn.weight.device if isinstance(conv, nn.Conv2d): kernel = conv.weight assert conv.bias is None else: assert isinstance(conv, nn.Identity) kernel = torch.ones(in_channels, 1, 1, 1, device=device) if isinstance(bn, nn.BatchNorm2d): running_mean = bn.running_mean running_var = bn.running_var gamma = bn.weight beta = bn.bias eps = bn.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return (kernel * t, beta - running_mean * gamma / std) assert isinstance(bn, nn.Identity) return (kernel, torch.zeros(in_channels).to(kernel.device)) def switch_to_deploy(self): if len(self.fusion_conv) == 0 and len(self.fusion_bn) == 0: return (kernel, bias) = self.get_equivalent_kernel_bias() self.cheap_operation = nn.Conv2d(in_channels=self.cheap_operation[0].in_channels, out_channels=self.cheap_operation[0].out_channels, kernel_size=self.cheap_operation[0].kernel_size, padding=self.cheap_operation[0].padding, dilation=self.cheap_operation[0].dilation, groups=self.cheap_operation[0].groups, bias=True) self.cheap_operation.weight.data = kernel self.cheap_operation.bias.data = bias self.__delattr__('fusion_conv') self.__delattr__('fusion_bn') self.fusion_conv = [] self.fusion_bn = [] def reparameterize(self): self.switch_to_deploy() class RepGhostBottleneck(nn.Module): def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, stride=1, act_layer=nn.ReLU, se_ratio=0.0, reparam=True): super(RepGhostBottleneck, self).__init__() has_se = se_ratio is not None and se_ratio > 0.0 self.stride = stride self.ghost1 = RepGhostModule(in_chs, mid_chs, relu=True, reparam=reparam) if self.stride > 1: self.conv_dw = nn.Conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=mid_chs, bias=False) self.bn_dw = nn.BatchNorm2d(mid_chs) else: self.conv_dw = None self.bn_dw = None self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None self.ghost2 = RepGhostModule(mid_chs, out_chs, relu=False, reparam=reparam) if in_chs == out_chs and self.stride == 1: self.shortcut = nn.Sequential() else: self.shortcut = nn.Sequential(nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False), nn.BatchNorm2d(in_chs), nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_chs)) def forward(self, x): shortcut = x x = self.ghost1(x) if self.conv_dw is not None: x = self.conv_dw(x) x = self.bn_dw(x) if self.se is not None: x = self.se(x) x = self.ghost2(x) x += self.shortcut(shortcut) return x class RepGhostNet(nn.Module): def __init__(self, cfgs, num_classes=1000, width=1.0, in_chans=3, output_stride=32, global_pool='avg', drop_rate=0.2, reparam=True): super(RepGhostNet, self).__init__() assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' self.cfgs = cfgs self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] stem_chs = make_divisible(16 * width, 4) self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) self.bn1 = nn.BatchNorm2d(stem_chs) self.act1 = nn.ReLU(inplace=True) prev_chs = stem_chs stages = nn.ModuleList([]) block = RepGhostBottleneck stage_idx = 0 net_stride = 2 for cfg in self.cfgs: layers = [] s = 1 for (k, exp_size, c, se_ratio, s) in cfg: out_chs = make_divisible(c * width, 4) mid_chs = make_divisible(exp_size * width, 4) layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, reparam=reparam)) prev_chs = out_chs if s > 1: net_stride *= 2 self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) stages.append(nn.Sequential(*layers)) stage_idx += 1 out_chs = make_divisible(exp_size * width * 2, 4) stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) self.pool_dim = prev_chs = out_chs self.blocks = nn.Sequential(*stages) self.num_features = prev_chs self.head_hidden_size = out_chs = 1280 self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) self.act2 = nn.ReLU(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^conv_stem|bn1', blocks=[('^blocks\\.(\\d+)' if coarse else '^blocks\\.(\\d+)\\.(\\d+)', None), ('conv_head', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def convert_to_deploy(self): repghost_model_convert(self, do_copy=False) def repghost_model_convert(model: torch.nn.Module, save_path=None, do_copy=True): if do_copy: model = copy.deepcopy(model) for module in model.modules(): if hasattr(module, 'switch_to_deploy'): module.switch_to_deploy() if save_path is not None: torch.save(model.state_dict(), save_path) return model def _create_repghostnet(variant, width=1.0, pretrained=False, **kwargs): cfgs = [[[3, 8, 16, 0, 1]], [[3, 24, 24, 0, 2]], [[3, 36, 24, 0, 1]], [[5, 36, 40, 0.25, 2]], [[5, 60, 40, 0.25, 1]], [[3, 120, 80, 0, 2]], [[3, 100, 80, 0, 1], [3, 120, 80, 0, 1], [3, 120, 80, 0, 1], [3, 240, 112, 0.25, 1], [3, 336, 112, 0.25, 1]], [[5, 336, 160, 0.25, 2]], [[5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1], [5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1]]] model_kwargs = dict(cfgs=cfgs, width=width, **kwargs) return build_model_with_cfg(RepGhostNet, variant, pretrained, feature_cfg=dict(flatten_sequential=True), **model_kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs} default_cfgs = generate_default_cfgs({'repghostnet_050.in1k': _cfg(hf_hub_id='timm/'), 'repghostnet_058.in1k': _cfg(hf_hub_id='timm/'), 'repghostnet_080.in1k': _cfg(hf_hub_id='timm/'), 'repghostnet_100.in1k': _cfg(hf_hub_id='timm/'), 'repghostnet_111.in1k': _cfg(hf_hub_id='timm/'), 'repghostnet_130.in1k': _cfg(hf_hub_id='timm/'), 'repghostnet_150.in1k': _cfg(hf_hub_id='timm/'), 'repghostnet_200.in1k': _cfg(hf_hub_id='timm/')}) @register_model def repghostnet_050(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_050', width=0.5, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_058(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_058', width=0.58, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_080(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_080', width=0.8, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_100(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_100', width=1.0, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_111(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_111', width=1.11, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_130(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_130', width=1.3, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_150(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_150', width=1.5, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_200(pretrained=False, **kwargs) -> RepGhostNet: model = _create_repghostnet('repghostnet_200', width=2.0, pretrained=pretrained, **kwargs) return model # File: pytorch-image-models-main/timm/models/repvit.py """""" __all__ = ['RepVit'] from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SqueezeExcite, trunc_normal_, to_ntuple, to_2tuple from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs class ConvNorm(nn.Sequential): def __init__(self, in_dim, out_dim, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.add_module('c', nn.Conv2d(in_dim, out_dim, ks, stride, pad, dilation, groups, bias=False)) self.add_module('bn', nn.BatchNorm2d(out_dim)) nn.init.constant_(self.bn.weight, bn_weight_init) nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): (c, bn) = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Conv2d(w.size(1) * self.c.groups, w.size(0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups, device=c.weight.device) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class NormLinear(nn.Sequential): def __init__(self, in_dim, out_dim, bias=True, std=0.02): super().__init__() self.add_module('bn', nn.BatchNorm1d(in_dim)) self.add_module('l', nn.Linear(in_dim, out_dim, bias=bias)) trunc_normal_(self.l.weight, std=std) if bias: nn.init.constant_(self.l.bias, 0) @torch.no_grad() def fuse(self): (bn, l) = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[None, :] if l.bias is None: b = b @ self.l.weight.T else: b = (l.weight @ b[:, None]).view(-1) + self.l.bias m = nn.Linear(w.size(1), w.size(0), device=l.weight.device) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class RepVggDw(nn.Module): def __init__(self, ed, kernel_size, legacy=False): super().__init__() self.conv = ConvNorm(ed, ed, kernel_size, 1, (kernel_size - 1) // 2, groups=ed) if legacy: self.conv1 = ConvNorm(ed, ed, 1, 1, 0, groups=ed) self.bn = nn.Identity() else: self.conv1 = nn.Conv2d(ed, ed, 1, 1, 0, groups=ed) self.bn = nn.BatchNorm2d(ed) self.dim = ed self.legacy = legacy def forward(self, x): return self.bn(self.conv(x) + self.conv1(x) + x) @torch.no_grad() def fuse(self): conv = self.conv.fuse() if self.legacy: conv1 = self.conv1.fuse() else: conv1 = self.conv1 conv_w = conv.weight conv_b = conv.bias conv1_w = conv1.weight conv1_b = conv1.bias conv1_w = nn.functional.pad(conv1_w, [1, 1, 1, 1]) identity = nn.functional.pad(torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1, 1, 1, 1]) final_conv_w = conv_w + conv1_w + identity final_conv_b = conv_b + conv1_b conv.weight.data.copy_(final_conv_w) conv.bias.data.copy_(final_conv_b) if not self.legacy: bn = self.bn w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = conv.weight * w[:, None, None, None] b = bn.bias + (conv.bias - bn.running_mean) * bn.weight / (bn.running_var + bn.eps) ** 0.5 conv.weight.data.copy_(w) conv.bias.data.copy_(b) return conv class RepVitMlp(nn.Module): def __init__(self, in_dim, hidden_dim, act_layer): super().__init__() self.conv1 = ConvNorm(in_dim, hidden_dim, 1, 1, 0) self.act = act_layer() self.conv2 = ConvNorm(hidden_dim, in_dim, 1, 1, 0, bn_weight_init=0) def forward(self, x): return self.conv2(self.act(self.conv1(x))) class RepViTBlock(nn.Module): def __init__(self, in_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy=False): super(RepViTBlock, self).__init__() self.token_mixer = RepVggDw(in_dim, kernel_size, legacy) self.se = SqueezeExcite(in_dim, 0.25) if use_se else nn.Identity() self.channel_mixer = RepVitMlp(in_dim, in_dim * mlp_ratio, act_layer) def forward(self, x): x = self.token_mixer(x) x = self.se(x) identity = x x = self.channel_mixer(x) return identity + x class RepVitStem(nn.Module): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) self.act1 = act_layer() self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) self.stride = 4 def forward(self, x): return self.conv2(self.act1(self.conv1(x))) class RepVitDownsample(nn.Module): def __init__(self, in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy=False): super().__init__() self.pre_block = RepViTBlock(in_dim, mlp_ratio, kernel_size, use_se=False, act_layer=act_layer, legacy=legacy) self.spatial_downsample = ConvNorm(in_dim, in_dim, kernel_size, 2, (kernel_size - 1) // 2, groups=in_dim) self.channel_downsample = ConvNorm(in_dim, out_dim, 1, 1) self.ffn = RepVitMlp(out_dim, out_dim * mlp_ratio, act_layer) def forward(self, x): x = self.pre_block(x) x = self.spatial_downsample(x) x = self.channel_downsample(x) identity = x x = self.ffn(x) return x + identity class RepVitClassifier(nn.Module): def __init__(self, dim, num_classes, distillation=False, drop=0.0): super().__init__() self.head_drop = nn.Dropout(drop) self.head = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() self.distillation = distillation self.distilled_training = False self.num_classes = num_classes if distillation: self.head_dist = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x): x = self.head_drop(x) if self.distillation: (x1, x2) = (self.head(x), self.head_dist(x)) if self.training and self.distilled_training and (not torch.jit.is_scripting()): return (x1, x2) else: return (x1 + x2) / 2 else: x = self.head(x) return x @torch.no_grad() def fuse(self): if not self.num_classes > 0: return nn.Identity() head = self.head.fuse() if self.distillation: head_dist = self.head_dist.fuse() head.weight += head_dist.weight head.bias += head_dist.bias head.weight /= 2 head.bias /= 2 return head else: return head class RepVitStage(nn.Module): def __init__(self, in_dim, out_dim, depth, mlp_ratio, act_layer, kernel_size=3, downsample=True, legacy=False): super().__init__() if downsample: self.downsample = RepVitDownsample(in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy) else: assert in_dim == out_dim self.downsample = nn.Identity() blocks = [] use_se = True for _ in range(depth): blocks.append(RepViTBlock(out_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy)) use_se = not use_se self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class RepVit(nn.Module): def __init__(self, in_chans=3, img_size=224, embed_dim=(48,), depth=(2,), mlp_ratio=2, global_pool='avg', kernel_size=3, num_classes=1000, act_layer=nn.GELU, distillation=True, drop_rate=0.0, legacy=False): super(RepVit, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.embed_dim = embed_dim self.num_classes = num_classes in_dim = embed_dim[0] self.stem = RepVitStem(in_chans, in_dim, act_layer) stride = self.stem.stride resolution = tuple([i // p for (i, p) in zip(to_2tuple(img_size), to_2tuple(stride))]) num_stages = len(embed_dim) mlp_ratios = to_ntuple(num_stages)(mlp_ratio) self.feature_info = [] stages = [] for i in range(num_stages): downsample = True if i != 0 else False stages.append(RepVitStage(in_dim, embed_dim[i], depth[i], mlp_ratio=mlp_ratios[i], act_layer=act_layer, kernel_size=kernel_size, downsample=downsample, legacy=legacy)) stage_stride = 2 if downsample else 1 stride *= stage_stride resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] in_dim = embed_dim[i] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = embed_dim[-1] self.head_drop = nn.Dropout(drop_rate) self.head = RepVitClassifier(embed_dim[-1], num_classes, distillation) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None, distillation: bool=False): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = RepVitClassifier(self.embed_dim[-1], num_classes, distillation) @torch.jit.ignore def set_distilled_training(self, enable=True): self.head.distilled_training = enable def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': x = x.mean((2, 3), keepdim=False) x = self.head_drop(x) if pre_logits: return x return self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x @torch.no_grad() def fuse(self): def fuse_children(net): for (child_name, child) in net.named_children(): if hasattr(child, 'fuse'): fused = child.fuse() setattr(net, child_name, fused) fuse_children(fused) else: fuse_children(child) fuse_children(self) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.c', 'classifier': ('head.head.l', 'head.head_dist.l'), **kwargs} default_cfgs = generate_default_cfgs({'repvit_m1.dist_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m2.dist_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m3.dist_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m0_9.dist_300e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m0_9.dist_450e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m1_0.dist_300e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m1_0.dist_450e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m1_1.dist_300e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m1_1.dist_450e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m1_5.dist_300e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m1_5.dist_450e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m2_3.dist_300e_in1k': _cfg(hf_hub_id='timm/'), 'repvit_m2_3.dist_450e_in1k': _cfg(hf_hub_id='timm/')}) def _create_repvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg(RepVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def repvit_m1(pretrained=False, **kwargs): model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2), legacy=True) return _create_repvit('repvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m2(pretrained=False, **kwargs): model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2), legacy=True) return _create_repvit('repvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m3(pretrained=False, **kwargs): model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 18, 2), legacy=True) return _create_repvit('repvit_m3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m0_9(pretrained=False, **kwargs): model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2)) return _create_repvit('repvit_m0_9', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m1_0(pretrained=False, **kwargs): model_args = dict(embed_dim=(56, 112, 224, 448), depth=(2, 2, 14, 2)) return _create_repvit('repvit_m1_0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m1_1(pretrained=False, **kwargs): model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2)) return _create_repvit('repvit_m1_1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m1_5(pretrained=False, **kwargs): model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 24, 4)) return _create_repvit('repvit_m1_5', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m2_3(pretrained=False, **kwargs): model_args = dict(embed_dim=(80, 160, 320, 640), depth=(6, 6, 34, 2)) return _create_repvit('repvit_m2_3', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/res2net.py """""" import math import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet __all__ = [] class Bottle2neck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_): super(Bottle2neck, self).__init__() self.scale = scale self.is_first = stride > 1 or downsample is not None self.num_scales = max(1, scale - 1) width = int(math.floor(planes * (base_width / 64.0))) * cardinality self.width = width outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) self.bn1 = norm_layer(width * scale) convs = [] bns = [] for i in range(self.num_scales): convs.append(nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)) bns.append(norm_layer(width)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) if self.is_first: self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) else: self.pool = None self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) self.bn3 = norm_layer(outplanes) self.se = attn_layer(outplanes) if attn_layer is not None else None self.relu = act_layer(inplace=True) self.downsample = downsample def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) spx = torch.split(out, self.width, 1) spo = [] sp = spx[0] for (i, (conv, bn)) in enumerate(zip(self.convs, self.bns)): if i == 0 or self.is_first: sp = spx[i] else: sp = sp + spx[i] sp = conv(sp) sp = bn(sp) sp = self.relu(sp) spo.append(sp) if self.scale > 1: if self.pool is not None: spo.append(self.pool(spx[-1])) else: spo.append(spx[-1]) out = torch.cat(spo, 1) out = self.conv3(out) out = self.bn3(out) if self.se is not None: out = self.se(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.relu(out) return out def _create_res2net(variant, pretrained=False, **kwargs): return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs} default_cfgs = generate_default_cfgs({'res2net50_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_48w_2s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_14w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_6s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net101_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2next50.in1k': _cfg(hf_hub_id='timm/'), 'res2net50d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), 'res2net101d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0')}) @register_model def res2net50_26w_4s(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net50_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101_26w_4s(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net101_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_6s(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6)) return _create_res2net('res2net50_26w_6s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_8s(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8)) return _create_res2net('res2net50_26w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_48w_2s(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2)) return _create_res2net('res2net50_48w_2s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_14w_8s(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8)) return _create_res2net('res2net50_14w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2next50(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4)) return _create_res2net('res2next50', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50d(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net50d', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101d(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net101d', pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/resnest.py """""" from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SplitAttn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet class ResNestBottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): super(ResNestBottleneck, self).__init__() assert reduce_first == 1 assert attn_layer is None assert aa_layer is None assert drop_path is None group_width = int(planes * (base_width / 64.0)) * cardinality first_dilation = first_dilation or dilation if avd and (stride > 1 or is_first): avd_stride = stride stride = 1 else: avd_stride = 0 self.radix = radix self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) self.bn1 = norm_layer(group_width) self.act1 = act_layer(inplace=True) self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None if self.radix >= 1: self.conv2 = SplitAttn(group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block) self.bn2 = nn.Identity() self.drop_block = nn.Identity() self.act2 = nn.Identity() else: self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) self.bn2 = norm_layer(group_width) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act2 = act_layer(inplace=True) self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and (not avd_first) else None self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) self.bn3 = norm_layer(planes * 4) self.act3 = act_layer(inplace=True) self.downsample = downsample def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.act1(out) if self.avd_first is not None: out = self.avd_first(out) out = self.conv2(out) out = self.bn2(out) out = self.drop_block(out) out = self.act2(out) if self.avd_last is not None: out = self.avd_last(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.act3(out) return out def _create_resnest(variant, pretrained=False, **kwargs): return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1.0', 'classifier': 'fc', **kwargs} default_cfgs = generate_default_cfgs({'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'), 'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'), 'resnest50d.in1k': _cfg(hf_hub_id='timm/'), 'resnest101e.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), 'resnest200e.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), 'resnest269e.in1k': _cfg(hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), 'resnest50d_4s2x40d.in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'resnest50d_1s4x24d.in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic')}) @register_model def resnest14d(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[1, 1, 1, 1], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest26d(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[2, 2, 2, 2], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest101e(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[3, 4, 23, 3], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest200e(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[3, 24, 36, 3], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest269e(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[3, 30, 48, 8], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, block_args=dict(radix=4, avd=True, avd_first=True)) return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet: model_kwargs = dict(block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, block_args=dict(radix=1, avd=True, avd_first=True)) return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) # File: pytorch-image-models-main/timm/models/resnet.py """""" import math from functools import partial from typing import Any, Dict, List, Optional, Tuple, Type, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, LayerType, create_attn, get_attn, get_act_layer, get_norm_layer, create_classifier, create_aa from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] def get_padding(kernel_size: int, stride: int, dilation: int=1) -> int: padding = (stride - 1 + dilation * (kernel_size - 1)) // 2 return padding class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, cardinality: int=1, base_width: int=64, reduce_first: int=1, dilation: int=1, first_dilation: Optional[int]=None, act_layer: Type[nn.Module]=nn.ReLU, norm_layer: Type[nn.Module]=nn.BatchNorm2d, attn_layer: Optional[Type[nn.Module]]=None, aa_layer: Optional[Type[nn.Module]]=None, drop_block: Optional[Type[nn.Module]]=None, drop_path: Optional[nn.Module]=None): super(BasicBlock, self).__init__() assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock does not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, dilation=first_dilation, bias=False) self.bn1 = norm_layer(first_planes) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act1 = act_layer(inplace=True) self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa) self.conv2 = nn.Conv2d(first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) self.bn2 = norm_layer(outplanes) self.se = create_attn(attn_layer, outplanes) self.act2 = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_path = drop_path def zero_init_last(self): if getattr(self.bn2, 'weight', None) is not None: nn.init.zeros_(self.bn2.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.conv1(x) x = self.bn1(x) x = self.drop_block(x) x = self.act1(x) x = self.aa(x) x = self.conv2(x) x = self.bn2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act2(x) return x class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, cardinality: int=1, base_width: int=64, reduce_first: int=1, dilation: int=1, first_dilation: Optional[int]=None, act_layer: Type[nn.Module]=nn.ReLU, norm_layer: Type[nn.Module]=nn.BatchNorm2d, attn_layer: Optional[Type[nn.Module]]=None, aa_layer: Optional[Type[nn.Module]]=None, drop_block: Optional[Type[nn.Module]]=None, drop_path: Optional[nn.Module]=None): super(Bottleneck, self).__init__() width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) self.bn1 = norm_layer(first_planes) self.act1 = act_layer(inplace=True) self.conv2 = nn.Conv2d(first_planes, width, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) self.bn2 = norm_layer(width) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act2 = act_layer(inplace=True) self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa) self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) self.bn3 = norm_layer(outplanes) self.se = create_attn(attn_layer, outplanes) self.act3 = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_path = drop_path def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.drop_block(x) x = self.act2(x) x = self.aa(x) x = self.conv3(x) x = self.bn3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act3(x) return x def downsample_conv(in_channels: int, out_channels: int, kernel_size: int, stride: int=1, dilation: int=1, first_dilation: Optional[int]=None, norm_layer: Optional[Type[nn.Module]]=None) -> nn.Module: norm_layer = norm_layer or nn.BatchNorm2d kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size first_dilation = first_dilation or dilation if kernel_size > 1 else 1 p = get_padding(kernel_size, stride, first_dilation) return nn.Sequential(*[nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), norm_layer(out_channels)]) def downsample_avg(in_channels: int, out_channels: int, kernel_size: int, stride: int=1, dilation: int=1, first_dilation: Optional[int]=None, norm_layer: Optional[Type[nn.Module]]=None) -> nn.Module: norm_layer = norm_layer or nn.BatchNorm2d avg_stride = stride if dilation == 1 else 1 if stride == 1 and dilation == 1: pool = nn.Identity() else: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) return nn.Sequential(*[pool, nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), norm_layer(out_channels)]) def drop_blocks(drop_prob: float=0.0): return [None, None, partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None, partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.0) if drop_prob else None] def make_blocks(block_fn: Union[BasicBlock, Bottleneck], channels: Tuple[int, ...], block_repeats: Tuple[int, ...], inplanes: int, reduce_first: int=1, output_stride: int=32, down_kernel_size: int=1, avg_down: bool=False, drop_block_rate: float=0.0, drop_path_rate: float=0.0, **kwargs) -> Tuple[List[Tuple[str, nn.Module]], List[Dict[str, Any]]]: stages = [] feature_info = [] net_num_blocks = sum(block_repeats) net_block_idx = 0 net_stride = 4 dilation = prev_dilation = 1 for (stage_idx, (planes, num_blocks, db)) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): stage_name = f'layer{stage_idx + 1}' stride = 1 if stage_idx == 0 else 2 if net_stride >= output_stride: dilation *= stride stride = 1 else: net_stride *= stride downsample = None if stride != 1 or inplanes != planes * block_fn.expansion: down_kwargs = dict(in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer')) downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) blocks = [] for block_idx in range(num_blocks): downsample = downsample if block_idx == 0 else None stride = stride if block_idx == 0 else 1 block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) blocks.append(block_fn(inplanes, planes, stride, downsample, first_dilation=prev_dilation, drop_path=DropPath(block_dpr) if block_dpr > 0.0 else None, **block_kwargs)) prev_dilation = dilation inplanes = planes * block_fn.expansion net_block_idx += 1 stages.append((stage_name, nn.Sequential(*blocks))) feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) return (stages, feature_info) class ResNet(nn.Module): def __init__(self, block: Union[BasicBlock, Bottleneck], layers: Tuple[int, ...], num_classes: int=1000, in_chans: int=3, output_stride: int=32, global_pool: str='avg', cardinality: int=1, base_width: int=64, stem_width: int=64, stem_type: str='', replace_stem_pool: bool=False, block_reduce_first: int=1, down_kernel_size: int=1, avg_down: bool=False, channels: Optional[Tuple[int, ...]]=(64, 128, 256, 512), act_layer: LayerType=nn.ReLU, norm_layer: LayerType=nn.BatchNorm2d, aa_layer: Optional[Type[nn.Module]]=None, drop_rate: float=0.0, drop_path_rate: float=0.0, drop_block_rate: float=0.0, zero_init_last: bool=True, block_args: Optional[Dict[str, Any]]=None): super(ResNet, self).__init__() block_args = block_args or dict() assert output_stride in (8, 16, 32) self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False act_layer = get_act_layer(act_layer) norm_layer = get_norm_layer(norm_layer) deep_stem = 'deep' in stem_type inplanes = stem_width * 2 if deep_stem else 64 if deep_stem: stem_chs = (stem_width, stem_width) if 'tiered' in stem_type: stem_chs = (3 * (stem_width // 4), stem_width) self.conv1 = nn.Sequential(*[nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), norm_layer(stem_chs[0]), act_layer(inplace=True), nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), norm_layer(stem_chs[1]), act_layer(inplace=True), nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) else: self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(inplanes) self.act1 = act_layer(inplace=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] if replace_stem_pool: self.maxpool = nn.Sequential(*filter(None, [nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), create_aa(aa_layer, channels=inplanes, stride=2) if aa_layer is not None else None, norm_layer(inplanes), act_layer(inplace=True)])) elif aa_layer is not None: if issubclass(aa_layer, nn.AvgPool2d): self.maxpool = aa_layer(2) else: self.maxpool = nn.Sequential(*[nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=inplanes, stride=2)]) else: self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) (stage_modules, stage_feature_info) = make_blocks(block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args) for stage in stage_modules: self.add_module(*stage) self.feature_info.extend(stage_feature_info) self.num_features = self.head_hidden_size = channels[-1] * block.expansion (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.init_weights(zero_init_last=zero_init_last) @torch.jit.ignore def init_weights(self, zero_init_last: bool=True): for (n, m) in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if zero_init_last: for m in self.modules(): if hasattr(m, 'zero_init_last'): m.zero_init_last() @torch.jit.ignore def group_matcher(self, coarse: bool=False): matcher = dict(stem='^conv1|bn1|maxpool', blocks='^layer(\\d+)' if coarse else '^layer(\\d+)\\.(\\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self, name_only: bool=False): return 'fc' if name_only else self.fc def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(5, indices) feat_idx = 0 x = self.conv1(x) x = self.bn1(x) x = self.act1(x) if feat_idx in take_indices: intermediates.append(x) x = self.maxpool(x) layer_names = ('layer1', 'layer2', 'layer3', 'layer4') if stop_early: layer_names = layer_names[:max_index] for n in layer_names: feat_idx += 1 x = getattr(self, n)(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(5, indices) layer_names = ('layer1', 'layer2', 'layer3', 'layer4') layer_names = layer_names[max_index:] for n in layer_names: setattr(self, n, nn.Identity()) if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.maxpool(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq([self.layer1, self.layer2, self.layer3, self.layer4], x, flatten=True) else: x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool=False) -> torch.Tensor: x = self.global_pool(x) if self.drop_rate: x = F.dropout(x, p=float(self.drop_rate), training=self.training) return x if pre_logits else self.fc(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _create_resnet(variant, pretrained: bool=False, **kwargs) -> ResNet: return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs} def _tcfg(url='', **kwargs): return _cfg(url=url, **dict({'interpolation': 'bicubic'}, **kwargs)) def _ttcfg(url='', **kwargs): return _cfg(url=url, **dict({'interpolation': 'bicubic', 'test_input_size': (3, 288, 288), 'test_crop_pct': 0.95, 'origin_url': 'https://github.com/huggingface/pytorch-image-models'}, **kwargs)) def _rcfg(url='', **kwargs): return _cfg(url=url, **dict({'interpolation': 'bicubic', 'crop_pct': 0.95, 'test_input_size': (3, 288, 288), 'test_crop_pct': 1.0, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476'}, **kwargs)) def _r3cfg(url='', **kwargs): return _cfg(url=url, **dict({'interpolation': 'bicubic', 'input_size': (3, 160, 160), 'pool_size': (5, 5), 'crop_pct': 0.95, 'test_input_size': (3, 224, 224), 'test_crop_pct': 0.95, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476'}, **kwargs)) def _gcfg(url='', **kwargs): return _cfg(url=url, **dict({'interpolation': 'bicubic', 'origin_url': 'https://cv.gluon.ai/model_zoo/classification.html'}, **kwargs)) default_cfgs = generate_default_cfgs({'resnet10t.c3_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet10t_176_c3-f3215ab1.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), first_conv='conv1.0'), 'resnet14t.c3_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet14t_176_c3-c4ed2c37.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), first_conv='conv1.0'), 'resnet18.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a1_0-d63eafa0.pth'), 'resnet18.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a2_0-b61bd467.pth'), 'resnet18.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a3_0-40c531c8.pth'), 'resnet18d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', first_conv='conv1.0'), 'resnet34.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a1_0-46f8f793.pth'), 'resnet34.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a2_0-82d47d71.pth'), 'resnet34.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a3_0-a20cabb6.pth', crop_pct=0.95), 'resnet34.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), 'resnet34d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', first_conv='conv1.0'), 'resnet26.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth'), 'resnet26d.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', first_conv='conv1.0'), 'resnet26t.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'resnet50.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth'), 'resnet50.a1h_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth', input_size=(3, 176, 176), pool_size=(6, 6), crop_pct=0.9, test_input_size=(3, 224, 224), test_crop_pct=1.0), 'resnet50.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a2_0-a2746f79.pth'), 'resnet50.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a3_0-59cae1ef.pth'), 'resnet50.b1k_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b1k-532a802a.pth'), 'resnet50.b2k_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b2k-1ba180c1.pth'), 'resnet50.c1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c1-5ba5e060.pth'), 'resnet50.c2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c2-d01e05b2.pth'), 'resnet50.d_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_d-f39db8af.pth'), 'resnet50.ram_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth'), 'resnet50.am_in1k': _tcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_am-6c502b37.pth'), 'resnet50.ra_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ra-85ebb6e5.pth'), 'resnet50.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/rw_resnet50-86acaeed.pth'), 'resnet50d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', first_conv='conv1.0'), 'resnet50d.ra4_e3600_r224_in1k': _rcfg(hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0, first_conv='conv1.0'), 'resnet50d.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a1_0-e20cff14.pth', first_conv='conv1.0'), 'resnet50d.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a2_0-a3adc64d.pth', first_conv='conv1.0'), 'resnet50d.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a3_0-403fdfad.pth', first_conv='conv1.0'), 'resnet50t.untrained': _ttcfg(first_conv='conv1.0'), 'resnet101.a1h_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth'), 'resnet101.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1_0-cdcb52a9.pth'), 'resnet101.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a2_0-6edb36c7.pth'), 'resnet101.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a3_0-1db14157.pth'), 'resnet101d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'resnet152.a1h_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth'), 'resnet152.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1_0-2eee8a7a.pth'), 'resnet152.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a2_0-b4c6978f.pth'), 'resnet152.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a3_0-134d4688.pth'), 'resnet152d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'resnet200.untrained': _ttcfg(), 'resnet200d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'wide_resnet50_2.racm_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth'), 'resnet18.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet18-5c106cde.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet34.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet34-333f7ec4.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet50.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet50-19c8e357.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet50.tv2_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet50-11ad3fa6.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet101.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet101.tv2_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet101-cd907fc2.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet152.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet152-b121ed2d.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet152.tv2_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet152-f82ba261.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet50_2.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet50_2.tv2_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet101_2.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet101_2.tv2_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet50_gn.a1h_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth', crop_pct=0.94), 'resnext50_32x4d.a1h_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth'), 'resnext50_32x4d.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1_0-b5a91a1d.pth'), 'resnext50_32x4d.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a2_0-efc76add.pth'), 'resnext50_32x4d.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a3_0-3e450271.pth'), 'resnext50_32x4d.ra_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth'), 'resnext50d_32x4d.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', first_conv='conv1.0'), 'resnext101_32x4d.untrained': _ttcfg(), 'resnext101_64x4d.c1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnext101_64x4d_c-0d0e0cc0.pth'), 'resnext50_32x4d.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_32x8d.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_64x4d.tv_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext50_32x4d.tv2_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_32x8d.tv2_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x16d.fb_wsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x32d.fb_wsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x48d.fb_wsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnet18.fb_ssl_yfcc100m_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnet50.fb_ssl_yfcc100m_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnet18.fb_swsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnet50.fb_swsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'ecaresnet26t.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), test_crop_pct=0.95, test_input_size=(3, 320, 320)), 'ecaresnetlight.miil_in1k': _tcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnetlight-75a9c627.pth', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50d.miil_in1k': _tcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d-93c81e3b.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50d_pruned.miil_in1k': _tcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d_p-e4fa23c2.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50t.ra2_in1k': _tcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), test_crop_pct=0.95, test_input_size=(3, 320, 320)), 'ecaresnet50t.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a1_0-99bd76a8.pth', first_conv='conv1.0'), 'ecaresnet50t.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a2_0-b1c7b745.pth', first_conv='conv1.0'), 'ecaresnet50t.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a3_0-8cc311f1.pth', first_conv='conv1.0'), 'ecaresnet101d.miil_in1k': _tcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d-153dad65.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet101d_pruned.miil_in1k': _tcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d_p-9e74cb91.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet200d.untrained': _ttcfg(first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8)), 'ecaresnet269d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 352, 352)), 'ecaresnext26t_32x4d.untrained': _tcfg(first_conv='conv1.0'), 'ecaresnext50t_32x4d.untrained': _tcfg(first_conv='conv1.0'), 'seresnet18.untrained': _ttcfg(), 'seresnet34.untrained': _ttcfg(), 'seresnet50.a1_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a1_0-ffa00869.pth', crop_pct=0.95), 'seresnet50.a2_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a2_0-850de0d9.pth', crop_pct=0.95), 'seresnet50.a3_in1k': _r3cfg(hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a3_0-317ecd56.pth', crop_pct=0.95), 'seresnet50.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth'), 'seresnet50t.untrained': _ttcfg(first_conv='conv1.0'), 'seresnet101.untrained': _ttcfg(), 'seresnet152.untrained': _ttcfg(), 'seresnet152d.ra2_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'seresnet200d.untrained': _ttcfg(first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), 'seresnet269d.untrained': _ttcfg(first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), 'seresnext26d_32x4d.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', first_conv='conv1.0'), 'seresnext26t_32x4d.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', first_conv='conv1.0'), 'seresnext50_32x4d.racm_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth'), 'seresnext101_32x4d.untrained': _ttcfg(), 'seresnext101_32x8d.ah_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101_32x8d_ah-e6bc4c0a.pth'), 'seresnext101d_32x8d.ah_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101d_32x8d_ah-191d7b94.pth', first_conv='conv1.0'), 'resnetaa50d.sw_in12k_ft_in1k': _ttcfg(hf_hub_id='timm/', first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa101d.sw_in12k_ft_in1k': _ttcfg(hf_hub_id='timm/', first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'seresnextaa101d_32x8d.sw_in12k_ft_in1k_288': _ttcfg(hf_hub_id='timm/', crop_pct=0.95, input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0, first_conv='conv1.0'), 'seresnextaa101d_32x8d.sw_in12k_ft_in1k': _ttcfg(hf_hub_id='timm/', first_conv='conv1.0', test_crop_pct=1.0), 'seresnextaa201d_32x8d.sw_in12k_ft_in1k_384': _cfg(hf_hub_id='timm/', interpolation='bicubic', first_conv='conv1.0', pool_size=(12, 12), input_size=(3, 384, 384), crop_pct=1.0), 'seresnextaa201d_32x8d.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, interpolation='bicubic', first_conv='conv1.0', crop_pct=0.95, input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'resnetaa50d.sw_in12k': _ttcfg(hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa50d.d_in12k': _ttcfg(hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa101d.sw_in12k': _ttcfg(hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'seresnextaa101d_32x8d.sw_in12k': _ttcfg(hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetblur18.untrained': _ttcfg(), 'resnetblur50.bt_in1k': _ttcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth'), 'resnetblur50d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetblur101d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetaa34d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetaa50.a1h_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetaa50_a1h-4cf422b3.pth'), 'seresnetaa50d.untrained': _ttcfg(first_conv='conv1.0'), 'seresnextaa101d_32x8d.ah_in1k': _rcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnextaa101d_32x8d_ah-83c8ae12.pth', first_conv='conv1.0'), 'resnetrs50.tf_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs101.tf_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs152.tf_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs200.tf_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetrs200_c-6b698b88.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs270.tf_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs350.tf_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs420.tf_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), interpolation='bicubic', first_conv='conv1.0'), 'resnet18.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), 'resnet34.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), 'resnet50.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), 'resnet101.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), 'resnet152.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), 'resnet50c.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', first_conv='conv1.0'), 'resnet101c.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', first_conv='conv1.0'), 'resnet152c.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', first_conv='conv1.0'), 'resnet50d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', first_conv='conv1.0'), 'resnet101d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', first_conv='conv1.0'), 'resnet152d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', first_conv='conv1.0'), 'resnet50s.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', first_conv='conv1.0'), 'resnet101s.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', first_conv='conv1.0'), 'resnet152s.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', first_conv='conv1.0'), 'resnext50_32x4d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), 'resnext101_32x4d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), 'resnext101_64x4d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), 'seresnext50_32x4d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), 'seresnext101_32x4d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), 'seresnext101_64x4d.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), 'senet154.gluon_in1k': _gcfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', first_conv='conv1.0')}) @register_model def resnet10t(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(1, 1, 1, 1), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet10t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet14t(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(1, 1, 1, 1), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet14t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet18(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2)) return _create_resnet('resnet18', pretrained, **dict(model_args, **kwargs)) @register_model def resnet18d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet18d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet34(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3)) return _create_resnet('resnet34', pretrained, **dict(model_args, **kwargs)) @register_model def resnet34d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet34d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2)) return _create_resnet('resnet26', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26t(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet26t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet26d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3)) return _create_resnet('resnet50', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50c(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep') return _create_resnet('resnet50c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50s(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=64, stem_type='deep') return _create_resnet('resnet50s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50t(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3)) return _create_resnet('resnet101', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101c(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep') return _create_resnet('resnet101c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet101d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101s(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=64, stem_type='deep') return _create_resnet('resnet101s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3)) return _create_resnet('resnet152', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152c(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep') return _create_resnet('resnet152c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet152d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152s(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=64, stem_type='deep') return _create_resnet('resnet152s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet200(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3)) return _create_resnet('resnet200', pretrained, **dict(model_args, **kwargs)) @register_model def resnet200d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def wide_resnet50_2(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), base_width=128) return _create_resnet('wide_resnet50_2', pretrained, **dict(model_args, **kwargs)) @register_model def wide_resnet101_2(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), base_width=128) return _create_resnet('wide_resnet101_2', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50_gn(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), norm_layer='groupnorm') return _create_resnet('resnet50_gn', pretrained, **dict(model_args, **kwargs)) @register_model def resnext50_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4) return _create_resnet('resnext50_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext50d_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnext50d_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=4) return _create_resnet('resnext101_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x8d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8) return _create_resnet('resnext101_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x16d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=16) return _create_resnet('resnext101_32x16d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x32d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=32) return _create_resnet('resnext101_32x32d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_64x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=64, base_width=4) return _create_resnet('resnext101_64x4d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet26t(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet26t', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet50d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet50d_pruned(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) @register_model def ecaresnet50t(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnetlight(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(1, 1, 11, 3), stem_width=32, avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnetlight', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet101d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet101d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet101d_pruned(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) @register_model def ecaresnet200d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet269d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 30, 48, 8), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet269d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnext26t_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnext50t_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnext50t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet18(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), block_args=dict(attn_layer='se')) return _create_resnet('seresnet18', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet34(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet34', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet50(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet50', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet50t(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet101(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet101', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet152(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet152', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet152d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet152d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet200d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet269d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 30, 48, 8), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet269d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext26d_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext26d_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext26t_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext50_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext50_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_32x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_32x8d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101d_32x8d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_64x4d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=64, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_64x4d', pretrained, **dict(model_args, **kwargs)) @register_model def senet154(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), cardinality=64, base_width=4, stem_type='deep', down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se')) return _create_resnet('senet154', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur18(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), aa_layer=BlurPool2d) return _create_resnet('resnetblur18', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur50(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=BlurPool2d) return _create_resnet('resnetblur50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur50d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=BlurPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetblur50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur101d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), aa_layer=BlurPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetblur101d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa34d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa34d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa50(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d) return _create_resnet('resnetaa50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa50d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa101d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa101d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnetaa50d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnetaa50d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnextaa101d_32x8d(pretrained: bool=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, stem_width=32, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, block_args=dict(attn_layer='se')) return _create_resnet('seresnextaa101d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnextaa201d_32x8d(pretrained: bool=False, **kwargs): model_args = dict(block=Bottleneck, layers=(3, 24, 36, 4), cardinality=32, base_width=8, stem_width=64, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, block_args=dict(attn_layer='se')) return _create_resnet('seresnextaa201d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs50(pretrained: bool=False, **kwargs) -> ResNet: attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs101(pretrained: bool=False, **kwargs) -> ResNet: attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs101', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs152(pretrained: bool=False, **kwargs) -> ResNet: attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs152', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs200(pretrained: bool=False, **kwargs) -> ResNet: attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs200', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs270(pretrained: bool=False, **kwargs) -> ResNet: attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict(block=Bottleneck, layers=(4, 29, 53, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs270', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs350(pretrained: bool=False, **kwargs) -> ResNet: attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict(block=Bottleneck, layers=(4, 36, 72, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs350', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs420(pretrained: bool=False, **kwargs) -> ResNet: attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict(block=Bottleneck, layers=(4, 44, 87, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs420', pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, {'tv_resnet34': 'resnet34.tv_in1k', 'tv_resnet50': 'resnet50.tv_in1k', 'tv_resnet101': 'resnet101.tv_in1k', 'tv_resnet152': 'resnet152.tv_in1k', 'tv_resnext50_32x4d': 'resnext50_32x4d.tv_in1k', 'ig_resnext101_32x8d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x16d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x32d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x48d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ssl_resnet18': 'resnet18.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnet50': 'resnet50.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext50_32x4d': 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x4d': 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x8d': 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x16d': 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k', 'swsl_resnet18': 'resnet18.fb_swsl_ig1b_ft_in1k', 'swsl_resnet50': 'resnet50.fb_swsl_ig1b_ft_in1k', 'swsl_resnext50_32x4d': 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x4d': 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x8d': 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x16d': 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k', 'gluon_resnet18_v1b': 'resnet18.gluon_in1k', 'gluon_resnet34_v1b': 'resnet34.gluon_in1k', 'gluon_resnet50_v1b': 'resnet50.gluon_in1k', 'gluon_resnet101_v1b': 'resnet101.gluon_in1k', 'gluon_resnet152_v1b': 'resnet152.gluon_in1k', 'gluon_resnet50_v1c': 'resnet50c.gluon_in1k', 'gluon_resnet101_v1c': 'resnet101c.gluon_in1k', 'gluon_resnet152_v1c': 'resnet152c.gluon_in1k', 'gluon_resnet50_v1d': 'resnet50d.gluon_in1k', 'gluon_resnet101_v1d': 'resnet101d.gluon_in1k', 'gluon_resnet152_v1d': 'resnet152d.gluon_in1k', 'gluon_resnet50_v1s': 'resnet50s.gluon_in1k', 'gluon_resnet101_v1s': 'resnet101s.gluon_in1k', 'gluon_resnet152_v1s': 'resnet152s.gluon_in1k', 'gluon_resnext50_32x4d': 'resnext50_32x4d.gluon_in1k', 'gluon_resnext101_32x4d': 'resnext101_32x4d.gluon_in1k', 'gluon_resnext101_64x4d': 'resnext101_64x4d.gluon_in1k', 'gluon_seresnext50_32x4d': 'seresnext50_32x4d.gluon_in1k', 'gluon_seresnext101_32x4d': 'seresnext101_32x4d.gluon_in1k', 'gluon_seresnext101_64x4d': 'seresnext101_64x4d.gluon_in1k', 'gluon_senet154': 'senet154.gluon_in1k', 'seresnext26tn_32x4d': 'seresnext26t_32x4d'}) # File: pytorch-image-models-main/timm/models/resnetv2.py """""" from collections import OrderedDict from functools import partial from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import GroupNormAct, BatchNormAct2d, EvoNorm2dS0, FilterResponseNormTlu2d, ClassifierHead, DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d, get_act_layer, get_norm_act_layer, make_divisible from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq, named_apply, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['ResNetV2'] class PreActBottleneck(nn.Module): def __init__(self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.0): super().__init__() first_dilation = first_dilation or dilation conv_layer = conv_layer or StdConv2d norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) out_chs = out_chs or in_chs mid_chs = make_divisible(out_chs * bottle_ratio) if proj_layer is not None: self.downsample = proj_layer(in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, conv_layer=conv_layer, norm_layer=norm_layer) else: self.downsample = None self.norm1 = norm_layer(in_chs) self.conv1 = conv_layer(in_chs, mid_chs, 1) self.norm2 = norm_layer(mid_chs) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) self.norm3 = norm_layer(mid_chs) self.conv3 = conv_layer(mid_chs, out_chs, 1) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv3.weight) def forward(self, x): x_preact = self.norm1(x) shortcut = x if self.downsample is not None: shortcut = self.downsample(x_preact) x = self.conv1(x_preact) x = self.conv2(self.norm2(x)) x = self.conv3(self.norm3(x)) x = self.drop_path(x) return x + shortcut class Bottleneck(nn.Module): def __init__(self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.0): super().__init__() first_dilation = first_dilation or dilation act_layer = act_layer or nn.ReLU conv_layer = conv_layer or StdConv2d norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) out_chs = out_chs or in_chs mid_chs = make_divisible(out_chs * bottle_ratio) if proj_layer is not None: self.downsample = proj_layer(in_chs, out_chs, stride=stride, dilation=dilation, preact=False, conv_layer=conv_layer, norm_layer=norm_layer) else: self.downsample = None self.conv1 = conv_layer(in_chs, mid_chs, 1) self.norm1 = norm_layer(mid_chs) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) self.norm2 = norm_layer(mid_chs) self.conv3 = conv_layer(mid_chs, out_chs, 1) self.norm3 = norm_layer(out_chs, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act3 = act_layer(inplace=True) def zero_init_last(self): if getattr(self.norm3, 'weight', None) is not None: nn.init.zeros_(self.norm3.weight) def forward(self, x): shortcut = x if self.downsample is not None: shortcut = self.downsample(x) x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) x = self.norm2(x) x = self.conv3(x) x = self.norm3(x) x = self.drop_path(x) x = self.act3(x + shortcut) return x class DownsampleConv(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, conv_layer=None, norm_layer=None): super(DownsampleConv, self).__init__() self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) def forward(self, x): return self.norm(self.conv(x)) class DownsampleAvg(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, conv_layer=None, norm_layer=None): super(DownsampleAvg, self).__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = conv_layer(in_chs, out_chs, 1, stride=1) self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) def forward(self, x): return self.norm(self.conv(self.pool(x))) class ResNetStage(nn.Module): def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, avg_down=False, block_dpr=None, block_fn=PreActBottleneck, act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs): super(ResNetStage, self).__init__() first_dilation = 1 if dilation in (1, 2) else 2 layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) proj_layer = DownsampleAvg if avg_down else DownsampleConv prev_chs = in_chs self.blocks = nn.Sequential() for block_idx in range(depth): drop_path_rate = block_dpr[block_idx] if block_dpr else 0.0 stride = stride if block_idx == 0 else 1 self.blocks.add_module(str(block_idx), block_fn(prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, **layer_kwargs, **block_kwargs)) prev_chs = out_chs first_dilation = dilation proj_layer = None def forward(self, x): x = self.blocks(x) return x def is_stem_deep(stem_type): return any([s in stem_type for s in ('deep', 'tiered')]) def create_resnetv2_stem(in_chs, out_chs=64, stem_type='', preact=True, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32)): stem = OrderedDict() assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') if is_stem_deep(stem_type): if 'tiered' in stem_type: stem_chs = (3 * out_chs // 8, out_chs // 2) else: stem_chs = (out_chs // 2, out_chs // 2) stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) stem['norm1'] = norm_layer(stem_chs[0]) stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) stem['norm2'] = norm_layer(stem_chs[1]) stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) if not preact: stem['norm3'] = norm_layer(out_chs) else: stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) if not preact: stem['norm'] = norm_layer(out_chs) if 'fixed' in stem_type: stem['pad'] = nn.ConstantPad2d(1, 0.0) stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) elif 'same' in stem_type: stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') else: stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) return nn.Sequential(stem) class ResNetV2(nn.Module): def __init__(self, layers, channels=(256, 512, 1024, 2048), num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, act_layer=nn.ReLU, norm_layer=partial(GroupNormAct, num_groups=32), conv_layer=StdConv2d, drop_rate=0.0, drop_path_rate=0.0, zero_init_last=False): super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate wf = width_factor norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) act_layer = get_act_layer(act_layer) self.feature_info = [] stem_chs = make_divisible(stem_chs * wf) self.stem = create_resnetv2_stem(in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer) stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) prev_chs = stem_chs curr_stride = 4 dilation = 1 block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] block_fn = PreActBottleneck if preact else Bottleneck self.stages = nn.Sequential() for (stage_idx, (d, c, bdpr)) in enumerate(zip(layers, channels, block_dprs)): out_chs = make_divisible(c * wf) stride = 1 if stage_idx == 0 else 2 if curr_stride >= output_stride: dilation *= stride stride = 1 stage = ResNetStage(prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn) prev_chs = out_chs curr_stride *= stride self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] self.stages.add_module(str(stage_idx), stage) self.num_features = self.head_hidden_size = prev_chs self.norm = norm_layer(self.num_features) if preact else nn.Identity() self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) self.init_weights(zero_init_last=zero_init_last) self.grad_checkpointing = False @torch.jit.ignore def init_weights(self, zero_init_last=True): named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix='resnet/'): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x, flatten=True) else: x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str='', zero_init_last=True): if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): nn.init.normal_(module.weight, mean=0.0, std=0.01) nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif zero_init_last and hasattr(module, 'zero_init_last'): module.zero_init_last() @torch.no_grad() def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str='resnet/'): import numpy as np def t2p(conv_weights): if conv_weights.ndim == 4: conv_weights = conv_weights.transpose([3, 2, 0, 1]) return torch.from_numpy(conv_weights) weights = np.load(checkpoint_path) stem_conv_w = adapt_input_conv(model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) model.stem.conv.weight.copy_(stem_conv_w) model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) for (i, (sname, stage)) in enumerate(model.stages.named_children()): for (j, (bname, block)) in enumerate(stage.blocks.named_children()): cname = 'standardized_conv2d' block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) if block.downsample is not None: w = weights[f'{block_prefix}a/proj/{cname}/kernel'] block.downsample.conv.weight.copy_(t2p(w)) def _create_resnetv2(variant, pretrained=False, **kwargs): feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg(ResNetV2, variant, pretrained, feature_cfg=feature_cfg, **kwargs) def _create_resnetv2_bit(variant, pretrained=False, **kwargs): return _create_resnetv2(variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-08), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'resnetv2_50x1_bit.goog_distilled_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic', custom_load=True), 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic', custom_load=True), 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic', custom_load=True), 'resnetv2_50x1_bit.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_50x3_bit.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_101x1_bit.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_101x3_bit.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_152x2_bit.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_152x4_bit.goog_in21k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0, custom_load=True), 'resnetv2_50x1_bit.goog_in21k': _cfg(hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_50x3_bit.goog_in21k': _cfg(hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_101x1_bit.goog_in21k': _cfg(hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_101x3_bit.goog_in21k': _cfg(hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_152x2_bit.goog_in21k': _cfg(hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_152x4_bit.goog_in21k': _cfg(hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_50.a1h_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d.untrained': _cfg(interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_50t.untrained': _cfg(interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_101.a1h_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_101d.untrained': _cfg(interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_152.untrained': _cfg(interpolation='bicubic'), 'resnetv2_152d.untrained': _cfg(interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_50d_gn.ah_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic', first_conv='stem.conv1', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d_evos.ah_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic', first_conv='stem.conv1', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d_frn.untrained': _cfg(interpolation='bicubic', first_conv='stem.conv1')}) @register_model def resnetv2_50x1_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit('resnetv2_50x1_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) @register_model def resnetv2_50x3_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit('resnetv2_50x3_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) @register_model def resnetv2_101x1_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit('resnetv2_101x1_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) @register_model def resnetv2_101x3_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit('resnetv2_101x3_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) @register_model def resnetv2_152x2_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit('resnetv2_152x2_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) @register_model def resnetv2_152x4_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit('resnetv2_152x4_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) @register_model def resnetv2_50(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_50', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50t(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='tiered', avg_down=True) return _create_resnetv2('resnetv2_50t', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_101(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_101', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_101d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_101d', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_152(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_152', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_152d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_152d', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d_gn(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_gn', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d_evos(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_evos', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d_frn(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_frn', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, {'resnetv2_50x1_bitm': 'resnetv2_50x1_bit.goog_in21k_ft_in1k', 'resnetv2_50x3_bitm': 'resnetv2_50x3_bit.goog_in21k_ft_in1k', 'resnetv2_101x1_bitm': 'resnetv2_101x1_bit.goog_in21k_ft_in1k', 'resnetv2_101x3_bitm': 'resnetv2_101x3_bit.goog_in21k_ft_in1k', 'resnetv2_152x2_bitm': 'resnetv2_152x2_bit.goog_in21k_ft_in1k', 'resnetv2_152x4_bitm': 'resnetv2_152x4_bit.goog_in21k_ft_in1k', 'resnetv2_50x1_bitm_in21k': 'resnetv2_50x1_bit.goog_in21k', 'resnetv2_50x3_bitm_in21k': 'resnetv2_50x3_bit.goog_in21k', 'resnetv2_101x1_bitm_in21k': 'resnetv2_101x1_bit.goog_in21k', 'resnetv2_101x3_bitm_in21k': 'resnetv2_101x3_bit.goog_in21k', 'resnetv2_152x2_bitm_in21k': 'resnetv2_152x2_bit.goog_in21k', 'resnetv2_152x4_bitm_in21k': 'resnetv2_152x4_bit.goog_in21k', 'resnetv2_50x1_bit_distilled': 'resnetv2_50x1_bit.goog_distilled_in1k', 'resnetv2_152x2_bit_teacher': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k', 'resnetv2_152x2_bit_teacher_384': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384'}) # File: pytorch-image-models-main/timm/models/rexnet.py """""" from functools import partial from math import ceil from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule from ._builder import build_model_with_cfg from ._efficientnet_builder import efficientnet_init_weights from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['RexNet'] SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) class LinearBottleneck(nn.Module): def __init__(self, in_chs, out_chs, stride, dilation=(1, 1), exp_ratio=1.0, se_ratio=0.0, ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_path=None): super(LinearBottleneck, self).__init__() self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and (in_chs <= out_chs) self.in_channels = in_chs self.out_channels = out_chs if exp_ratio != 1.0: dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer) else: dw_chs = in_chs self.conv_exp = None self.conv_dw = ConvNormAct(dw_chs, dw_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=dw_chs, apply_act=False) if se_ratio > 0: self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) else: self.se = None self.act_dw = create_act_layer(dw_act_layer) self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False) self.drop_path = drop_path def feat_channels(self, exp=False): return self.conv_dw.out_channels if exp else self.out_channels def forward(self, x): shortcut = x if self.conv_exp is not None: x = self.conv_exp(x) x = self.conv_dw(x) if self.se is not None: x = self.se(x) x = self.act_dw(x) x = self.conv_pwl(x) if self.use_shortcut: if self.drop_path is not None: x = self.drop_path(x) x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1) return x def _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, se_ratio=0.0, ch_div=1): layers = [1, 2, 2, 3, 3, 5] strides = [1, 2, 2, 2, 1, 2] layers = [ceil(element * depth_mult) for element in layers] strides = sum([[element] + [1] * (layers[idx] - 1) for (idx, element) in enumerate(strides)], []) exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) depth = sum(layers[:]) * 3 base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs out_chs_list = [] for i in range(depth // 3): out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) base_chs += final_chs / (depth // 3 * 1.0) se_ratios = [0.0] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) def _build_blocks(block_cfg, prev_chs, width_mult, ch_div=1, output_stride=32, act_layer='swish', dw_act_layer='relu6', drop_path_rate=0.0): feat_chs = [prev_chs] feature_info = [] curr_stride = 2 dilation = 1 features = [] num_blocks = len(block_cfg) for (block_idx, (chs, exp_ratio, stride, se_ratio)) in enumerate(block_cfg): next_dilation = dilation if stride > 1: fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] if curr_stride >= output_stride: next_dilation = dilation * stride stride = 1 block_dpr = drop_path_rate * block_idx / (num_blocks - 1) drop_path = DropPath(block_dpr) if block_dpr > 0.0 else None features.append(LinearBottleneck(in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, dilation=(dilation, next_dilation), se_ratio=se_ratio, ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path)) curr_stride *= stride dilation = next_dilation prev_chs = chs feat_chs += [features[-1].feat_channels()] pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer)) return (features, feature_info) class RexNet(nn.Module): def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, se_ratio=1 / 12.0, ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_rate=0.2, drop_path_rate=0.0): super(RexNet, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False assert output_stride in (32, 16, 8) stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) (features, self.feature_info) = _build_blocks(block_cfg, stem_chs, width_mult, ch_div, output_stride, act_layer, dw_act_layer, drop_path_rate) self.num_features = self.head_hidden_size = features[-1].out_channels self.features = nn.Sequential(*features) self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) efficientnet_init_weights(self) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^stem', blocks='^features\\.(\\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.features, x, flatten=True) else: x = self.features(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_rexnet(variant, pretrained, **kwargs): feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg(RexNet, variant, pretrained, feature_cfg=feature_cfg, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'license': 'mit', **kwargs} default_cfgs = generate_default_cfgs({'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnetr_100.untrained': _cfg(), 'rexnetr_130.untrained': _cfg(), 'rexnetr_150.untrained': _cfg(), 'rexnetr_200.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_300.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_200.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_300.sw_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0')}) @register_model def rexnet_100(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnet_100', pretrained, **kwargs) @register_model def rexnet_130(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) @register_model def rexnet_150(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) @register_model def rexnet_200(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) @register_model def rexnet_300(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs) @register_model def rexnetr_100(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) @register_model def rexnetr_130(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) @register_model def rexnetr_150(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) @register_model def rexnetr_200(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) @register_model def rexnetr_300(pretrained=False, **kwargs) -> RexNet: return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs) # File: pytorch-image-models-main/timm/models/selecsls.py """""" from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SelecSls'] class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method def forward(self, x): pass @torch.jit._overload_method def forward(self, x): pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x class SelectSeq(nn.Module): def __init__(self, mode='index', index=0): super(SelectSeq, self).__init__() self.mode = mode self.index = index @torch.jit._overload_method def forward(self, x): pass @torch.jit._overload_method def forward(self, x): pass def forward(self, x) -> torch.Tensor: if self.mode == 'index': return x[self.index] else: return torch.cat(x, dim=1) def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): if padding is None: padding = (stride - 1 + dilation * (k - 1)) // 2 return nn.Sequential(nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_chs), nn.ReLU(inplace=True)) class SelecSlsBlock(nn.Module): def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): super(SelecSlsBlock, self).__init__() self.stride = stride self.is_first = is_first assert stride in [1, 2] self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) self.conv2 = conv_bn(mid_chs, mid_chs, 1) self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if not isinstance(x, list): x = [x] assert len(x) in [1, 2] d1 = self.conv1(x[0]) d2 = self.conv3(self.conv2(d1)) d3 = self.conv5(self.conv4(d2)) if self.is_first: out = self.conv6(torch.cat([d1, d2, d3], 1)) return [out, out] else: return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] class SelecSls(nn.Module): def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): self.num_classes = num_classes super(SelecSls, self).__init__() self.stem = conv_bn(in_chans, 32, stride=2) self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) self.from_seq = SelectSeq() self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) self.num_features = self.head_hidden_size = cfg['num_features'] self.feature_info = cfg['feature_info'] (self.global_pool, self.head_drop, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) for (n, m) in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^features\\.(\\d+)', blocks_head='^head') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.features(x) x = self.head(self.from_seq(x)) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_selecsls(variant, pretrained, **kwargs): cfg = {} feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] if variant.startswith('selecsls42'): cfg['block'] = SelecSlsBlock cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1)] feature_info.extend([dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.3'), dict(num_chs=480, reduction=16, module='features.5')]) feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls42b': cfg['head'] = [(480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1)] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [(480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1)] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant.startswith('selecsls60'): cfg['block'] = SelecSlsBlock cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1)] feature_info.extend([dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.4'), dict(num_chs=416, reduction=16, module='features.8')]) feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls60b': cfg['head'] = [(416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1)] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [(416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1)] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant == 'selecsls84': cfg['block'] = SelecSlsBlock cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1)] feature_info.extend([dict(num_chs=144, reduction=4, module='features.1'), dict(num_chs=304, reduction=8, module='features.6'), dict(num_chs=512, reduction=16, module='features.12')]) cfg['head'] = [(512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1)] cfg['num_features'] = 1280 feature_info.extend([dict(num_chs=1024, reduction=32, module='head.1'), dict(num_chs=1280, reduction=64, module='head.3')]) else: raise ValueError('Invalid net configuration ' + variant + ' !!!') cfg['feature_info'] = feature_info return build_model_with_cfg(SelecSls, variant, pretrained, model_cfg=cfg, feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'fc', **kwargs} default_cfgs = generate_default_cfgs({'selecsls42.untrained': _cfg(interpolation='bicubic'), 'selecsls42b.in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60.in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60b.in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'selecsls84.untrained': _cfg(interpolation='bicubic')}) @register_model def selecsls42(pretrained=False, **kwargs) -> SelecSls: return _create_selecsls('selecsls42', pretrained, **kwargs) @register_model def selecsls42b(pretrained=False, **kwargs) -> SelecSls: return _create_selecsls('selecsls42b', pretrained, **kwargs) @register_model def selecsls60(pretrained=False, **kwargs) -> SelecSls: return _create_selecsls('selecsls60', pretrained, **kwargs) @register_model def selecsls60b(pretrained=False, **kwargs) -> SelecSls: return _create_selecsls('selecsls60b', pretrained, **kwargs) @register_model def selecsls84(pretrained=False, **kwargs) -> SelecSls: return _create_selecsls('selecsls84', pretrained, **kwargs) # File: pytorch-image-models-main/timm/models/senet.py """""" import math from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SENet'] def _weight_init(m): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0.0) class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = x.mean((2, 3), keepdim=True) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SEBottleneck(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): super(SEResNeXtBottleneck, self).__init__() width = math.floor(planes * (base_width / 64)) * groups self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes, reduction=reduction) self.downsample = downsample self.stride = stride def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SENet(nn.Module): def __init__(self, block, layers, groups, reduction, drop_rate=0.2, in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=1000, global_pool='avg'): super(SENet, self).__init__() self.inplanes = inplanes self.num_classes = num_classes self.drop_rate = drop_rate if input_3x3: layer0_modules = [('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True))] else: layer0_modules = [('conv1', nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True))] self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] self.layer1 = self._make_layer(block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0) self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] self.layer2 = self._make_layer(block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding) self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] self.layer3 = self._make_layer(block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding) self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] self.layer4 = self._make_layer(block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding) self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] self.num_features = self.head_hidden_size = 512 * block.expansion (self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) for m in self.modules(): _weight_init(m) def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d(planes * block.expansion)) layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^layer0', blocks='^layer(\\d+)' if coarse else '^layer(\\d+)\\.(\\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.last_linear def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.layer0(x) x = self.pool0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_senet(variant, pretrained=False, **kwargs): return build_model_with_cfg(SENet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', **kwargs} default_cfgs = generate_default_cfgs({'legacy_senet154.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'), 'legacy_seresnet18.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', interpolation='bicubic'), 'legacy_seresnet34.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), 'legacy_seresnet50.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), 'legacy_seresnet101.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), 'legacy_seresnet152.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), 'legacy_seresnext26_32x4d.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', interpolation='bicubic'), 'legacy_seresnext50_32x4d.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'), 'legacy_seresnext101_32x4d.in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth')}) @register_model def legacy_seresnet18(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet18', pretrained, **model_args) @register_model def legacy_seresnet34(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet34', pretrained, **model_args) @register_model def legacy_seresnet50(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet50', pretrained, **model_args) @register_model def legacy_seresnet101(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet101', pretrained, **model_args) @register_model def legacy_seresnet152(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet152', pretrained, **model_args) @register_model def legacy_senet154(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) return _create_senet('legacy_senet154', pretrained, **model_args) @register_model def legacy_seresnext26_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) @register_model def legacy_seresnext50_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) @register_model def legacy_seresnext101_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict(block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) # File: pytorch-image-models-main/timm/models/sequencer.py """""" import math from functools import partial from itertools import accumulate from typing import Optional, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['Sequencer2d'] def _init_weights(module: nn.Module, name: str, head_bias: float=0.0, flax=False): if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif flax: lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-06) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): stdv = 1.0 / math.sqrt(module.hidden_size) for weight in module.parameters(): nn.init.uniform_(weight, -stdv, stdv) elif hasattr(module, 'init_weights'): module.init_weights() class RNNIdentity(nn.Module): def __init__(self, *args, **kwargs): super(RNNIdentity, self).__init__() def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]: return (x, None) class RNN2dBase(nn.Module): def __init__(self, input_size: int, hidden_size: int, num_layers: int=1, bias: bool=True, bidirectional: bool=True, union='cat', with_fc=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = 2 * hidden_size if bidirectional else hidden_size self.union = union self.with_vertical = True self.with_horizontal = True self.with_fc = with_fc self.fc = None if with_fc: if union == 'cat': self.fc = nn.Linear(2 * self.output_size, input_size) elif union == 'add': self.fc = nn.Linear(self.output_size, input_size) elif union == 'vertical': self.fc = nn.Linear(self.output_size, input_size) self.with_horizontal = False elif union == 'horizontal': self.fc = nn.Linear(self.output_size, input_size) self.with_vertical = False else: raise ValueError('Unrecognized union: ' + union) elif union == 'cat': pass if 2 * self.output_size != input_size: raise ValueError(f'The output channel {2 * self.output_size} is different from the input channel {input_size}.') elif union == 'add': pass if self.output_size != input_size: raise ValueError(f'The output channel {self.output_size} is different from the input channel {input_size}.') elif union == 'vertical': if self.output_size != input_size: raise ValueError(f'The output channel {self.output_size} is different from the input channel {input_size}.') self.with_horizontal = False elif union == 'horizontal': if self.output_size != input_size: raise ValueError(f'The output channel {self.output_size} is different from the input channel {input_size}.') self.with_vertical = False else: raise ValueError('Unrecognized union: ' + union) self.rnn_v = RNNIdentity() self.rnn_h = RNNIdentity() def forward(self, x): (B, H, W, C) = x.shape if self.with_vertical: v = x.permute(0, 2, 1, 3) v = v.reshape(-1, H, C) (v, _) = self.rnn_v(v) v = v.reshape(B, W, H, -1) v = v.permute(0, 2, 1, 3) else: v = None if self.with_horizontal: h = x.reshape(-1, W, C) (h, _) = self.rnn_h(h) h = h.reshape(B, H, W, -1) else: h = None if v is not None and h is not None: if self.union == 'cat': x = torch.cat([v, h], dim=-1) else: x = v + h elif v is not None: x = v elif h is not None: x = h if self.fc is not None: x = self.fc(x) return x class LSTM2d(RNN2dBase): def __init__(self, input_size: int, hidden_size: int, num_layers: int=1, bias: bool=True, bidirectional: bool=True, union='cat', with_fc=True): super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) if self.with_vertical: self.rnn_v = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) if self.with_horizontal: self.rnn_h = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) class Sequencer2dBlock(nn.Module): def __init__(self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, num_layers=1, bidirectional=True, union='cat', with_fc=True, drop=0.0, drop_path=0.0): super().__init__() channels_dim = int(mlp_ratio * dim) self.norm1 = norm_layer(dim) self.rnn_tokens = rnn_layer(dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Shuffle(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: (B, H, W, C) = x.shape r = torch.randperm(H * W) x = x.reshape(B, -1, C) x = x[:, r, :].reshape(B, H, W, -1) return x class Downsample2d(nn.Module): def __init__(self, input_dim, output_dim, patch_size): super().__init__() self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.down(x) x = x.permute(0, 2, 3, 1) return x class Sequencer2dStage(nn.Module): def __init__(self, dim, dim_out, depth, patch_size, hidden_size, mlp_ratio, downsample=False, block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, num_layers=1, bidirectional=True, union='cat', with_fc=True, drop=0.0, drop_path=0.0): super().__init__() if downsample: self.downsample = Downsample2d(dim, dim_out, patch_size) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): blocks.append(block_layer(dim_out, hidden_size, mlp_ratio=mlp_ratio, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop, drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path)) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class Sequencer2d(nn.Module): def __init__(self, num_classes=1000, img_size=224, in_chans=3, global_pool='avg', layers=(4, 3, 8, 3), patch_sizes=(7, 2, 2, 1), embed_dims=(192, 384, 384, 384), hidden_sizes=(48, 96, 96, 96), mlp_ratios=(3.0, 3.0, 3.0, 3.0), block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-06), act_layer=nn.GELU, num_rnn_layers=1, bidirectional=True, union='cat', with_fc=True, drop_rate=0.0, drop_path_rate=0.0, nlhb=False, stem_norm=False): super().__init__() assert global_pool in ('', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = embed_dims[-1] self.feature_dim = -1 self.output_fmt = 'NHWC' self.feature_info = [] self.stem = PatchEmbed(img_size=None, patch_size=patch_sizes[0], in_chans=in_chans, embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, flatten=False, output_fmt='NHWC') assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) reductions = list(accumulate(patch_sizes, lambda x, y: x * y)) stages = [] prev_dim = embed_dims[0] for (i, _) in enumerate(embed_dims): stages += [Sequencer2dStage(prev_dim, embed_dims[i], depth=layers[i], downsample=i > 0, patch_size=patch_sizes[i], hidden_size=hidden_sizes[i], mlp_ratio=mlp_ratios[i], block_layer=block_layer, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_rnn_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop_rate, drop_path=drop_path_rate)] prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.norm = norm_layer(embed_dims[-1]) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt) self.init_weights(nlhb=nlhb) def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0.0 named_apply(partial(_init_weights, head_bias=head_bias), module=self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks=[('^stages\\.(\\d+)', None), ('^norm', (99999,))] if coarse else [('^stages\\.(\\d+)\\.blocks\\.(\\d+)', None), ('^stages\\.(\\d+)\\.downsample', (0,)), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'stages.0.blocks.0.norm1.weight' in state_dict: return state_dict if 'model' in state_dict: state_dict = state_dict['model'] import re out_dict = {} for (k, v) in state_dict.items(): k = re.sub('blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k) k = re.sub('blocks.([0-9]+).([0-9]+)', 'stages.\\1.blocks.\\2', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_sequencer2d(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(Sequencer2d, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/')}) @register_model def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict(layers=[4, 3, 8, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union='cat', with_fc=True) model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict(layers=[4, 3, 14, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union='cat', with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict(layers=[8, 8, 16, 4], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union='cat', with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/sknet.py """""" import math from torch import nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectiveKernel, ConvNormAct, create_attn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet class SelectiveKernelBasic(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): super(SelectiveKernelBasic, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock doest not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = SelectiveKernel(inplanes, first_planes, stride=stride, dilation=first_dilation, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv2 = ConvNormAct(first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv2.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x class SelectiveKernelBottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): super(SelectiveKernelBottleneck, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) self.conv2 = SelectiveKernel(first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv3.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x def _create_skresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs} default_cfgs = generate_default_cfgs({'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet50.untrained': _cfg(), 'skresnet50d.untrained': _cfg(first_conv='conv1.0'), 'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/')}) @register_model def skresnet18(pretrained=False, **kwargs) -> ResNet: sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict(block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet18', pretrained, **model_args) @register_model def skresnet34(pretrained=False, **kwargs) -> ResNet: sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict(block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet34', pretrained, **model_args) @register_model def skresnet50(pretrained=False, **kwargs) -> ResNet: sk_kwargs = dict(split_input=True) model_args = dict(block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50', pretrained, **model_args) @register_model def skresnet50d(pretrained=False, **kwargs) -> ResNet: sk_kwargs = dict(split_input=True) model_args = dict(block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50d', pretrained, **model_args) @register_model def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet: sk_kwargs = dict(rd_ratio=1 / 16, rd_divisor=32, split_input=False) model_args = dict(block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) # File: pytorch-image-models-main/timm/models/swin_transformer.py """""" import logging import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, ClassifierHead, to_2tuple, to_ntuple, trunc_normal_, _assert, use_fused_attn, resize_rel_pos_bias_table, resample_patch_embed, ndgrid from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import generate_default_cfgs, register_model, register_model_deprecations from .vision_transformer import get_init_weights_vit __all__ = ['SwinTransformer'] _logger = logging.getLogger(__name__) _int_or_tuple_2_t = Union[int, Tuple[int, int]] def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor: (B, H, W, C) = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function def window_reverse(windows, window_size: Tuple[int, int], H: int, W: int): C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x def get_relative_position_index(win_h: int, win_w: int): coords = torch.stack(ndgrid(torch.arange(win_h), torch.arange(win_w))) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += win_h - 1 relative_coords[:, :, 1] += win_w - 1 relative_coords[:, :, 0] *= 2 * win_w - 1 return relative_coords.sum(-1) class WindowAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim: int, num_heads: int, head_dim: Optional[int]=None, window_size: _int_or_tuple_2_t=7, qkv_bias: bool=True, attn_drop: float=0.0, proj_drop: float=0.0): super().__init__() self.dim = dim self.window_size = to_2tuple(window_size) (win_h, win_w) = self.window_size self.window_area = win_h * win_w self.num_heads = num_heads head_dim = head_dim or dim // num_heads attn_dim = head_dim * num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn(experimental=True) self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * win_h - 1) * (2 * win_w - 1), num_heads)) self.register_buffer('relative_position_index', get_relative_position_index(win_h, win_w), persistent=False) self.qkv = nn.Linear(dim, attn_dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(attn_dim, dim) self.proj_drop = nn.Dropout(proj_drop) trunc_normal_(self.relative_position_bias_table, std=0.02) self.softmax = nn.Softmax(dim=-1) def set_window_size(self, window_size: Tuple[int, int]) -> None: window_size = to_2tuple(window_size) if window_size == self.window_size: return self.window_size = window_size (win_h, win_w) = self.window_size self.window_area = win_h * win_w with torch.no_grad(): new_bias_shape = ((2 * win_h - 1) * (2 * win_w - 1), self.num_heads) self.relative_position_bias_table = nn.Parameter(resize_rel_pos_bias_table(self.relative_position_bias_table, new_window_size=self.window_size, new_bias_shape=new_bias_shape)) self.register_buffer('relative_position_index', get_relative_position_index(win_h, win_w), persistent=False) def _get_rel_pos_bias(self) -> torch.Tensor: relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_area, self.window_area, -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() return relative_position_bias.unsqueeze(0) def forward(self, x, mask: Optional[torch.Tensor]=None): (B_, N, C) = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) if self.fused_attn: attn_mask = self._get_rel_pos_bias() if mask is not None: num_win = mask.shape[0] mask = mask.view(1, num_win, 1, N, N).expand(B_ // num_win, -1, self.num_heads, -1, -1) attn_mask = attn_mask + mask.reshape(-1, self.num_heads, N, N) x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn + self._get_rel_pos_bias() if mask is not None: num_win = mask.shape[0] attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B_, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerBlock(nn.Module): def __init__(self, dim: int, input_resolution: _int_or_tuple_2_t, num_heads: int=4, head_dim: Optional[int]=None, window_size: _int_or_tuple_2_t=7, shift_size: int=0, always_partition: bool=False, dynamic_mask: bool=False, mlp_ratio: float=4.0, qkv_bias: bool=True, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, act_layer: Callable=nn.GELU, norm_layer: Callable=nn.LayerNorm): super().__init__() self.dim = dim self.input_resolution = input_resolution self.target_shift_size = to_2tuple(shift_size) self.always_partition = always_partition self.dynamic_mask = dynamic_mask (self.window_size, self.shift_size) = self._calc_window_shift(window_size, shift_size) self.window_area = self.window_size[0] * self.window_size[1] self.mlp_ratio = mlp_ratio self.norm1 = norm_layer(dim) self.attn = WindowAttention(dim, num_heads=num_heads, head_dim=head_dim, window_size=self.window_size, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.register_buffer('attn_mask', None if self.dynamic_mask else self.get_attn_mask(), persistent=False) def get_attn_mask(self, x: Optional[torch.Tensor]=None) -> Optional[torch.Tensor]: if any(self.shift_size): if x is not None: (H, W) = (x.shape[1], x.shape[2]) device = x.device dtype = x.dtype else: (H, W) = self.input_resolution device = None dtype = None H = math.ceil(H / self.window_size[0]) * self.window_size[0] W = math.ceil(W / self.window_size[1]) * self.window_size[1] img_mask = torch.zeros((1, H, W, 1), dtype=dtype, device=device) cnt = 0 for h in ((0, -self.window_size[0]), (-self.window_size[0], -self.shift_size[0]), (-self.shift_size[0], None)): for w in ((0, -self.window_size[1]), (-self.window_size[1], -self.shift_size[1]), (-self.shift_size[1], None)): img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def _calc_window_shift(self, target_window_size: Union[int, Tuple[int, int]], target_shift_size: Optional[Union[int, Tuple[int, int]]]=None) -> Tuple[Tuple[int, int], Tuple[int, int]]: target_window_size = to_2tuple(target_window_size) if target_shift_size is None: target_shift_size = self.target_shift_size if any(target_shift_size): target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2) else: target_shift_size = to_2tuple(target_shift_size) if self.always_partition: return (target_window_size, target_shift_size) window_size = [r if r <= w else w for (r, w) in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for (r, w, s) in zip(self.input_resolution, window_size, target_shift_size)] return (tuple(window_size), tuple(shift_size)) def set_input_size(self, feat_size: Tuple[int, int], window_size: Tuple[int, int], always_partition: Optional[bool]=None): self.input_resolution = feat_size if always_partition is not None: self.always_partition = always_partition (self.window_size, self.shift_size) = self._calc_window_shift(window_size) self.window_area = self.window_size[0] * self.window_size[1] self.attn.set_window_size(self.window_size) self.register_buffer('attn_mask', None if self.dynamic_mask else self.get_attn_mask(), persistent=False) def _attn(self, x): (B, H, W, C) = x.shape has_shift = any(self.shift_size) if has_shift: shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) else: shifted_x = x pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h)) (_, Hp, Wp, _) = shifted_x.shape x_windows = window_partition(shifted_x, self.window_size) x_windows = x_windows.view(-1, self.window_area, C) if getattr(self, 'dynamic_mask', False): attn_mask = self.get_attn_mask(shifted_x) else: attn_mask = self.attn_mask attn_windows = self.attn(x_windows, mask=attn_mask) attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) shifted_x = shifted_x[:, :H, :W, :].contiguous() if has_shift: x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) else: x = shifted_x return x def forward(self, x): (B, H, W, C) = x.shape x = x + self.drop_path1(self._attn(self.norm1(x))) x = x.reshape(B, -1, C) x = x + self.drop_path2(self.mlp(self.norm2(x))) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): def __init__(self, dim: int, out_dim: Optional[int]=None, norm_layer: Callable=nn.LayerNorm): super().__init__() self.dim = dim self.out_dim = out_dim or 2 * dim self.norm = norm_layer(4 * dim) self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) def forward(self, x): (B, H, W, C) = x.shape pad_values = (0, 0, 0, H % 2, 0, W % 2) x = nn.functional.pad(x, pad_values) (_, H, W, _) = x.shape x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.norm(x) x = self.reduction(x) return x class SwinTransformerStage(nn.Module): def __init__(self, dim: int, out_dim: int, input_resolution: Tuple[int, int], depth: int, downsample: bool=True, num_heads: int=4, head_dim: Optional[int]=None, window_size: _int_or_tuple_2_t=7, always_partition: bool=False, dynamic_mask: bool=False, mlp_ratio: float=4.0, qkv_bias: bool=True, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: Union[List[float], float]=0.0, norm_layer: Callable=nn.LayerNorm): super().__init__() self.dim = dim self.input_resolution = input_resolution self.output_resolution = tuple((i // 2 for i in input_resolution)) if downsample else input_resolution self.depth = depth self.grad_checkpointing = False window_size = to_2tuple(window_size) shift_size = tuple([w // 2 for w in window_size]) if downsample: self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer) else: assert dim == out_dim self.downsample = nn.Identity() self.blocks = nn.Sequential(*[SwinTransformerBlock(dim=out_dim, input_resolution=self.output_resolution, num_heads=num_heads, head_dim=head_dim, window_size=window_size, shift_size=0 if i % 2 == 0 else shift_size, always_partition=always_partition, dynamic_mask=dynamic_mask, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) def set_input_size(self, feat_size: Tuple[int, int], window_size: int, always_partition: Optional[bool]=None): self.input_resolution = feat_size if isinstance(self.downsample, nn.Identity): self.output_resolution = feat_size else: self.output_resolution = tuple((i // 2 for i in feat_size)) for block in self.blocks: block.set_input_size(feat_size=self.output_resolution, window_size=window_size, always_partition=always_partition) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class SwinTransformer(nn.Module): def __init__(self, img_size: _int_or_tuple_2_t=224, patch_size: int=4, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', embed_dim: int=96, depths: Tuple[int, ...]=(2, 2, 6, 2), num_heads: Tuple[int, ...]=(3, 6, 12, 24), head_dim: Optional[int]=None, window_size: _int_or_tuple_2_t=7, always_partition: bool=False, strict_img_size: bool=True, mlp_ratio: float=4.0, qkv_bias: bool=True, drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.1, embed_layer: Callable=PatchEmbed, norm_layer: Union[str, Callable]=nn.LayerNorm, weight_init: str='', **kwargs): super().__init__() assert global_pool in ('', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.output_fmt = 'NHWC' self.num_layers = len(depths) self.embed_dim = embed_dim self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (self.num_layers - 1)) self.feature_info = [] if not isinstance(embed_dim, (tuple, list)): embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim[0], norm_layer=norm_layer, strict_img_size=strict_img_size, output_fmt='NHWC') patch_grid = self.patch_embed.grid_size head_dim = to_ntuple(self.num_layers)(head_dim) if not isinstance(window_size, (list, tuple)): window_size = to_ntuple(self.num_layers)(window_size) elif len(window_size) == 2: window_size = (window_size,) * self.num_layers assert len(window_size) == self.num_layers mlp_ratio = to_ntuple(self.num_layers)(mlp_ratio) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] layers = [] in_dim = embed_dim[0] scale = 1 for i in range(self.num_layers): out_dim = embed_dim[i] layers += [SwinTransformerStage(dim=in_dim, out_dim=out_dim, input_resolution=(patch_grid[0] // scale, patch_grid[1] // scale), depth=depths[i], downsample=i > 0, num_heads=num_heads[i], head_dim=head_dim[i], window_size=window_size[i], always_partition=always_partition, dynamic_mask=not strict_img_size, mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)] in_dim = out_dim if i > 0: scale *= 2 self.feature_info += [dict(num_chs=out_dim, reduction=patch_size * scale, module=f'layers.{i}')] self.layers = nn.Sequential(*layers) self.norm = norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt) if weight_init != 'skip': self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'moco', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.0 named_apply(get_init_weights_vit(mode, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): nwd = set() for (n, _) in self.named_parameters(): if 'relative_position_bias_table' in n: nwd.add(n) return nwd def set_input_size(self, img_size: Optional[Tuple[int, int]]=None, patch_size: Optional[Tuple[int, int]]=None, window_size: Optional[Tuple[int, int]]=None, window_ratio: int=8, always_partition: Optional[bool]=None) -> None: if img_size is not None or patch_size is not None: self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) patch_grid = self.patch_embed.grid_size if window_size is None: window_size = tuple([pg // window_ratio for pg in patch_grid]) for (index, stage) in enumerate(self.layers): stage_scale = 2 ** max(index - 1, 0) stage.set_input_size(feat_size=(patch_grid[0] // stage_scale, patch_grid[1] // stage_scale), window_size=window_size, always_partition=always_partition) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^patch_embed', blocks='^layers\\.(\\d+)' if coarse else [('^layers\\.(\\d+).downsample', (0,)), ('^layers\\.(\\d+)\\.\\w+\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.layers: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.layers), indices) x = self.patch_embed(x) num_stages = len(self.layers) if torch.jit.is_scripting() or not stop_early: stages = self.layers else: stages = self.layers[:max_index + 1] for (i, stage) in enumerate(stages): x = stage(x) if i in take_indices: if norm and i == num_stages - 1: x_inter = self.norm(x) else: x_inter = x x_inter = x_inter.permute(0, 3, 1, 2).contiguous() intermediates.append(x_inter) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.layers), indices) self.layers = self.layers[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): old_weights = True if 'head.fc.weight' in state_dict: old_weights = False import re out_dict = {} state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) for (k, v) in state_dict.items(): if any([n in k for n in ('relative_position_index', 'attn_mask')]): continue if 'patch_embed.proj.weight' in k: (_, _, H, W) = model.patch_embed.proj.weight.shape if v.shape[-2] != H or v.shape[-1] != W: v = resample_patch_embed(v, (H, W), interpolation='bicubic', antialias=True, verbose=True) if k.endswith('relative_position_bias_table'): m = model.get_submodule(k[:-29]) if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: v = resize_rel_pos_bias_table(v, new_window_size=m.window_size, new_bias_shape=m.relative_position_bias_table.shape) if old_weights: k = re.sub('layers.(\\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer(variant, pretrained=False, **kwargs): default_out_indices = tuple((i for (i, _) in enumerate(kwargs.get('depths', (1, 1, 3, 1))))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(SwinTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs} default_cfgs = generate_default_cfgs({'swin_small_patch4_window7_224.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22kto1k_finetune.pth'), 'swin_base_patch4_window7_224.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth'), 'swin_base_patch4_window12_384.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'swin_large_patch4_window7_224.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth'), 'swin_large_patch4_window12_384.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'swin_tiny_patch4_window7_224.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth'), 'swin_small_patch4_window7_224.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth'), 'swin_base_patch4_window7_224.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth'), 'swin_base_patch4_window12_384.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'swin_tiny_patch4_window7_224.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22kto1k_finetune.pth'), 'swin_tiny_patch4_window7_224.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22k.pth', num_classes=21841), 'swin_small_patch4_window7_224.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22k.pth', num_classes=21841), 'swin_base_patch4_window7_224.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', num_classes=21841), 'swin_base_patch4_window12_384.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841), 'swin_large_patch4_window7_224.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', num_classes=21841), 'swin_large_patch4_window12_384.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841), 'swin_s3_tiny_224.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_t-1d53f6a8.pth'), 'swin_s3_small_224.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_s-3bb4c69d.pth'), 'swin_s3_base_224.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_b-a1e95db4.pth')}) @register_model def swin_tiny_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_small_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_base_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_base_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_large_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_large_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_s3_tiny_224(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_s3_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_s3_small_224(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=(14, 14, 14, 7), embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_s3_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_s3_base_224(pretrained=False, **kwargs) -> SwinTransformer: model_args = dict(patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 30, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_s3_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, {'swin_base_patch4_window7_224_in22k': 'swin_base_patch4_window7_224.ms_in22k', 'swin_base_patch4_window12_384_in22k': 'swin_base_patch4_window12_384.ms_in22k', 'swin_large_patch4_window7_224_in22k': 'swin_large_patch4_window7_224.ms_in22k', 'swin_large_patch4_window12_384_in22k': 'swin_large_patch4_window12_384.ms_in22k'}) # File: pytorch-image-models-main/timm/models/swin_transformer_v2.py """""" import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, ClassifierHead, resample_patch_embed, ndgrid, get_act_layer, LayerType from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['SwinTransformerV2'] _int_or_tuple_2_t = Union[int, Tuple[int, int]] def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor: (B, H, W, C) = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function def window_reverse(windows: torch.Tensor, window_size: Tuple[int, int], img_size: Tuple[int, int]) -> torch.Tensor: (H, W) = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowAttention(nn.Module): def __init__(self, dim: int, window_size: Tuple[int, int], num_heads: int, qkv_bias: bool=True, qkv_bias_separate: bool=False, attn_drop: float=0.0, proj_drop: float=0.0, pretrained_window_size: Tuple[int, int]=(0, 0)) -> None: super().__init__() self.dim = dim self.window_size = window_size self.pretrained_window_size = to_2tuple(pretrained_window_size) self.num_heads = num_heads self.qkv_bias_separate = qkv_bias_separate self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False)) self.qkv = nn.Linear(dim, dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(dim)) self.register_buffer('k_bias', torch.zeros(dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(dim)) else: self.q_bias = None self.k_bias = None self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.softmax = nn.Softmax(dim=-1) self._make_pair_wise_relative_positions() def _make_pair_wise_relative_positions(self): relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0]).to(torch.float32) relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1]).to(torch.float32) relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w)) relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous().unsqueeze(0) if self.pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] /= self.pretrained_window_size[0] - 1 relative_coords_table[:, :, :, 1] /= self.pretrained_window_size[1] - 1 else: relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1 relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1 relative_coords_table *= 8 relative_coords_table = torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8) self.register_buffer('relative_coords_table', relative_coords_table, persistent=False) coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(ndgrid(coords_h, coords_w)) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer('relative_position_index', relative_position_index, persistent=False) def set_window_size(self, window_size: Tuple[int, int]) -> None: window_size = to_2tuple(window_size) if window_size != self.window_size: self.window_size = window_size self._make_pair_wise_relative_positions() def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None) -> torch.Tensor: (B_, N, C) = x.shape if self.q_bias is None: qkv = self.qkv(x) else: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.qkv_bias_separate: qkv = self.qkv(x) qkv += qkv_bias else: qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1) logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp() attn = attn * logit_scale relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() relative_position_bias = 16 * torch.sigmoid(relative_position_bias) attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: num_win = mask.shape[0] attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerV2Block(nn.Module): def __init__(self, dim: int, input_resolution: _int_or_tuple_2_t, num_heads: int, window_size: _int_or_tuple_2_t=7, shift_size: _int_or_tuple_2_t=0, always_partition: bool=False, dynamic_mask: bool=False, mlp_ratio: float=4.0, qkv_bias: bool=True, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, act_layer: LayerType='gelu', norm_layer: nn.Module=nn.LayerNorm, pretrained_window_size: _int_or_tuple_2_t=0): super().__init__() self.dim = dim self.input_resolution = to_2tuple(input_resolution) self.num_heads = num_heads self.target_shift_size = to_2tuple(shift_size) self.always_partition = always_partition self.dynamic_mask = dynamic_mask (self.window_size, self.shift_size) = self._calc_window_shift(window_size, shift_size) self.window_area = self.window_size[0] * self.window_size[1] self.mlp_ratio = mlp_ratio act_layer = get_act_layer(act_layer) self.attn = WindowAttention(dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, pretrained_window_size=to_2tuple(pretrained_window_size)) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.register_buffer('attn_mask', None if self.dynamic_mask else self.get_attn_mask(), persistent=False) def get_attn_mask(self, x: Optional[torch.Tensor]=None) -> Optional[torch.Tensor]: if any(self.shift_size): if x is None: img_mask = torch.zeros((1, *self.input_resolution, 1)) else: img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) cnt = 0 for h in ((0, -self.window_size[0]), (-self.window_size[0], -self.shift_size[0]), (-self.shift_size[0], None)): for w in ((0, -self.window_size[1]), (-self.window_size[1], -self.shift_size[1]), (-self.shift_size[1], None)): img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def _calc_window_shift(self, target_window_size: _int_or_tuple_2_t, target_shift_size: Optional[_int_or_tuple_2_t]=None) -> Tuple[Tuple[int, int], Tuple[int, int]]: target_window_size = to_2tuple(target_window_size) if target_shift_size is None: target_shift_size = self.target_shift_size if any(target_shift_size): target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2) else: target_shift_size = to_2tuple(target_shift_size) if self.always_partition: return (target_window_size, target_shift_size) target_window_size = to_2tuple(target_window_size) target_shift_size = to_2tuple(target_shift_size) window_size = [r if r <= w else w for (r, w) in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for (r, w, s) in zip(self.input_resolution, window_size, target_shift_size)] return (tuple(window_size), tuple(shift_size)) def set_input_size(self, feat_size: Tuple[int, int], window_size: Tuple[int, int], always_partition: Optional[bool]=None): self.input_resolution = feat_size if always_partition is not None: self.always_partition = always_partition (self.window_size, self.shift_size) = self._calc_window_shift(to_2tuple(window_size)) self.window_area = self.window_size[0] * self.window_size[1] self.attn.set_window_size(self.window_size) self.register_buffer('attn_mask', None if self.dynamic_mask else self.get_attn_mask(), persistent=False) def _attn(self, x: torch.Tensor) -> torch.Tensor: (B, H, W, C) = x.shape has_shift = any(self.shift_size) if has_shift: shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) else: shifted_x = x pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h)) (_, Hp, Wp, _) = shifted_x.shape x_windows = window_partition(shifted_x, self.window_size) x_windows = x_windows.view(-1, self.window_area, C) if getattr(self, 'dynamic_mask', False): attn_mask = self.get_attn_mask(shifted_x) else: attn_mask = self.attn_mask attn_windows = self.attn(x_windows, mask=attn_mask) attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) shifted_x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) shifted_x = shifted_x[:, :H, :W, :].contiguous() if has_shift: x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) else: x = shifted_x return x def forward(self, x: torch.Tensor) -> torch.Tensor: (B, H, W, C) = x.shape x = x + self.drop_path1(self.norm1(self._attn(x))) x = x.reshape(B, -1, C) x = x + self.drop_path2(self.norm2(self.mlp(x))) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): def __init__(self, dim: int, out_dim: Optional[int]=None, norm_layer: nn.Module=nn.LayerNorm): super().__init__() self.dim = dim self.out_dim = out_dim or 2 * dim self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) self.norm = norm_layer(self.out_dim) def forward(self, x: torch.Tensor) -> torch.Tensor: (B, H, W, C) = x.shape pad_values = (0, 0, 0, H % 2, 0, W % 2) x = nn.functional.pad(x, pad_values) (_, H, W, _) = x.shape x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.reduction(x) x = self.norm(x) return x class SwinTransformerV2Stage(nn.Module): def __init__(self, dim: int, out_dim: int, input_resolution: _int_or_tuple_2_t, depth: int, num_heads: int, window_size: _int_or_tuple_2_t, always_partition: bool=False, dynamic_mask: bool=False, downsample: bool=False, mlp_ratio: float=4.0, qkv_bias: bool=True, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, act_layer: Union[str, Callable]='gelu', norm_layer: nn.Module=nn.LayerNorm, pretrained_window_size: _int_or_tuple_2_t=0, output_nchw: bool=False) -> None: super().__init__() self.dim = dim self.input_resolution = input_resolution self.output_resolution = tuple((i // 2 for i in input_resolution)) if downsample else input_resolution self.depth = depth self.output_nchw = output_nchw self.grad_checkpointing = False window_size = to_2tuple(window_size) shift_size = tuple([w // 2 for w in window_size]) if downsample: self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer) else: assert dim == out_dim self.downsample = nn.Identity() self.blocks = nn.ModuleList([SwinTransformerV2Block(dim=out_dim, input_resolution=self.output_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if i % 2 == 0 else shift_size, always_partition=always_partition, dynamic_mask=dynamic_mask, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, act_layer=act_layer, norm_layer=norm_layer, pretrained_window_size=pretrained_window_size) for i in range(depth)]) def set_input_size(self, feat_size: Tuple[int, int], window_size: int, always_partition: Optional[bool]=None): self.input_resolution = feat_size if isinstance(self.downsample, nn.Identity): self.output_resolution = feat_size else: assert isinstance(self.downsample, PatchMerging) self.output_resolution = tuple((i // 2 for i in feat_size)) for block in self.blocks: block.set_input_size(feat_size=self.output_resolution, window_size=window_size, always_partition=always_partition) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.downsample(x) for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint.checkpoint(blk, x) else: x = blk(x) return x def _init_respostnorm(self) -> None: for blk in self.blocks: nn.init.constant_(blk.norm1.bias, 0) nn.init.constant_(blk.norm1.weight, 0) nn.init.constant_(blk.norm2.bias, 0) nn.init.constant_(blk.norm2.weight, 0) class SwinTransformerV2(nn.Module): def __init__(self, img_size: _int_or_tuple_2_t=224, patch_size: int=4, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', embed_dim: int=96, depths: Tuple[int, ...]=(2, 2, 6, 2), num_heads: Tuple[int, ...]=(3, 6, 12, 24), window_size: _int_or_tuple_2_t=7, always_partition: bool=False, strict_img_size: bool=True, mlp_ratio: float=4.0, qkv_bias: bool=True, drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.1, act_layer: Union[str, Callable]='gelu', norm_layer: Callable=nn.LayerNorm, pretrained_window_sizes: Tuple[int, ...]=(0, 0, 0, 0), **kwargs): super().__init__() self.num_classes = num_classes assert global_pool in ('', 'avg') self.global_pool = global_pool self.output_fmt = 'NHWC' self.num_layers = len(depths) self.embed_dim = embed_dim self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (self.num_layers - 1)) self.feature_info = [] if not isinstance(embed_dim, (tuple, list)): embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim[0], norm_layer=norm_layer, strict_img_size=strict_img_size, output_fmt='NHWC') grid_size = self.patch_embed.grid_size dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] layers = [] in_dim = embed_dim[0] scale = 1 for i in range(self.num_layers): out_dim = embed_dim[i] layers += [SwinTransformerV2Stage(dim=in_dim, out_dim=out_dim, input_resolution=(grid_size[0] // scale, grid_size[1] // scale), depth=depths[i], downsample=i > 0, num_heads=num_heads[i], window_size=window_size, always_partition=always_partition, dynamic_mask=not strict_img_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], act_layer=act_layer, norm_layer=norm_layer, pretrained_window_size=pretrained_window_sizes[i])] in_dim = out_dim if i > 0: scale *= 2 self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')] self.layers = nn.Sequential(*layers) self.norm = norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt) self.apply(self._init_weights) for bly in self.layers: bly._init_respostnorm() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) def set_input_size(self, img_size: Optional[Tuple[int, int]]=None, patch_size: Optional[Tuple[int, int]]=None, window_size: Optional[Tuple[int, int]]=None, window_ratio: Optional[int]=8, always_partition: Optional[bool]=None): if img_size is not None or patch_size is not None: self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) grid_size = self.patch_embed.grid_size if window_size is None and window_ratio is not None: window_size = tuple([s // window_ratio for s in grid_size]) for (index, stage) in enumerate(self.layers): stage_scale = 2 ** max(index - 1, 0) stage.set_input_size(feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale), window_size=window_size, always_partition=always_partition) @torch.jit.ignore def no_weight_decay(self): nod = set() for (n, m) in self.named_modules(): if any([kw in n for kw in ('cpb_mlp', 'logit_scale')]): nod.add(n) return nod @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^absolute_pos_embed|patch_embed', blocks='^layers\\.(\\d+)' if coarse else [('^layers\\.(\\d+).downsample', (0,)), ('^layers\\.(\\d+)\\.\\w+\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.layers: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.layers), indices) x = self.patch_embed(x) num_stages = len(self.layers) if torch.jit.is_scripting() or not stop_early: stages = self.layers else: stages = self.layers[:max_index + 1] for (i, stage) in enumerate(stages): x = stage(x) if i in take_indices: if norm and i == num_stages - 1: x_inter = self.norm(x) else: x_inter = x x_inter = x_inter.permute(0, 3, 1, 2).contiguous() intermediates.append(x_inter) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.layers), indices) self.layers = self.layers[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) native_checkpoint = 'head.fc.weight' in state_dict out_dict = {} import re for (k, v) in state_dict.items(): if any([n in k for n in ('relative_position_index', 'relative_coords_table', 'attn_mask')]): continue if 'patch_embed.proj.weight' in k: (_, _, H, W) = model.patch_embed.proj.weight.shape if v.shape[-2] != H or v.shape[-1] != W: v = resample_patch_embed(v, (H, W), interpolation='bicubic', antialias=True, verbose=True) if not native_checkpoint: k = re.sub('layers.(\\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer_v2(variant, pretrained=False, **kwargs): default_out_indices = tuple((i for (i, _) in enumerate(kwargs.get('depths', (1, 1, 1, 1))))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(SwinTransformerV2, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs} default_cfgs = generate_default_cfgs({'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth'), 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth'), 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'swinv2_tiny_window8_256.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth'), 'swinv2_tiny_window16_256.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth'), 'swinv2_small_window8_256.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth'), 'swinv2_small_window16_256.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth'), 'swinv2_base_window8_256.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth'), 'swinv2_base_window16_256.ms_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth'), 'swinv2_base_window12_192.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth', num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6)), 'swinv2_large_window12_192.ms_in22k': _cfg(hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth', num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6))}) @register_model def swinv2_tiny_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2('swinv2_tiny_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_tiny_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2('swinv2_tiny_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_small_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2('swinv2_small_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_small_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2('swinv2_small_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2('swinv2_base_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2('swinv2_base_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2('swinv2_base_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2('swinv2_base_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2('swinv2_base_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer_v2('swinv2_large_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2('swinv2_large_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: model_args = dict(window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2('swinv2_large_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, {'swinv2_base_window12_192_22k': 'swinv2_base_window12_192.ms_in22k', 'swinv2_base_window12to16_192to256_22kft1k': 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k', 'swinv2_base_window12to24_192to384_22kft1k': 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k', 'swinv2_large_window12_192_22k': 'swinv2_large_window12_192.ms_in22k', 'swinv2_large_window12to16_192to256_22kft1k': 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k', 'swinv2_large_window12to24_192to384_22kft1k': 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k'}) # File: pytorch-image-models-main/timm/models/swin_transformer_v2_cr.py """""" import logging import math from typing import Tuple, Optional, List, Union, Any, Type import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert, ndgrid from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import named_apply from ._registry import generate_default_cfgs, register_model __all__ = ['SwinTransformerV2Cr'] _logger = logging.getLogger(__name__) def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor: return x.permute(0, 2, 3, 1) def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor: return x.permute(0, 3, 1, 2) def window_partition(x, window_size: Tuple[int, int]): (B, H, W, C) = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): (H, W) = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowMultiHeadAttention(nn.Module): def __init__(self, dim: int, num_heads: int, window_size: Tuple[int, int], drop_attn: float=0.0, drop_proj: float=0.0, meta_hidden_dim: int=384, sequential_attn: bool=False) -> None: super(WindowMultiHeadAttention, self).__init__() assert dim % num_heads == 0, 'The number of input features (in_features) are not divisible by the number of heads (num_heads).' self.in_features: int = dim self.window_size: Tuple[int, int] = to_2tuple(window_size) self.num_heads: int = num_heads self.sequential_attn: bool = sequential_attn self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True) self.attn_drop = nn.Dropout(drop_attn) self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True) self.proj_drop = nn.Dropout(drop_proj) self.meta_mlp = Mlp(2, hidden_features=meta_hidden_dim, out_features=num_heads, act_layer=nn.ReLU, drop=(0.125, 0.0)) self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads))) self._make_pair_wise_relative_positions() def _make_pair_wise_relative_positions(self) -> None: device = self.logit_scale.device coordinates = torch.stack(ndgrid(torch.arange(self.window_size[0], device=device), torch.arange(self.window_size[1], device=device)), dim=0).flatten(1) relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :] relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float() relative_coordinates_log = torch.sign(relative_coordinates) * torch.log(1.0 + relative_coordinates.abs()) self.register_buffer('relative_coordinates_log', relative_coordinates_log, persistent=False) def set_window_size(self, window_size: Tuple[int, int]) -> None: window_size = to_2tuple(window_size) if window_size != self.window_size: self.window_size = window_size self._make_pair_wise_relative_positions() def _relative_positional_encodings(self) -> torch.Tensor: window_area = self.window_size[0] * self.window_size[1] relative_position_bias = self.meta_mlp(self.relative_coordinates_log) relative_position_bias = relative_position_bias.transpose(1, 0).reshape(self.num_heads, window_area, window_area) relative_position_bias = relative_position_bias.unsqueeze(0) return relative_position_bias def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None) -> torch.Tensor: (Bw, L, C) = x.shape qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (query, key, value) = qkv.unbind(0) attn = F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1) logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1.0 / 0.01)).exp() attn = attn * logit_scale attn = attn + self._relative_positional_encodings() if mask is not None: num_win: int = mask.shape[0] attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L) attn = attn + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, L, L) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerV2CrBlock(nn.Module): def __init__(self, dim: int, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], shift_size: Tuple[int, int]=(0, 0), always_partition: bool=False, dynamic_mask: bool=False, mlp_ratio: float=4.0, init_values: Optional[float]=0, proj_drop: float=0.0, drop_attn: float=0.0, drop_path: float=0.0, extra_norm: bool=False, sequential_attn: bool=False, norm_layer: Type[nn.Module]=nn.LayerNorm): super(SwinTransformerV2CrBlock, self).__init__() self.dim: int = dim self.feat_size: Tuple[int, int] = feat_size self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size) self.always_partition = always_partition self.dynamic_mask = dynamic_mask (self.window_size, self.shift_size) = self._calc_window_shift(window_size) self.window_area = self.window_size[0] * self.window_size[1] self.init_values: Optional[float] = init_values self.attn = WindowMultiHeadAttention(dim=dim, num_heads=num_heads, window_size=self.window_size, drop_attn=drop_attn, drop_proj=proj_drop, sequential_attn=sequential_attn) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), drop=proj_drop, out_features=dim) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() self.norm3 = norm_layer(dim) if extra_norm else nn.Identity() self.register_buffer('attn_mask', None if self.dynamic_mask else self.get_attn_mask(), persistent=False) self.init_weights() def _calc_window_shift(self, target_window_size: Tuple[int, int]) -> Tuple[Tuple[int, int], Tuple[int, int]]: target_window_size = to_2tuple(target_window_size) target_shift_size = self.target_shift_size if any(target_shift_size): target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2) if self.always_partition: return (target_window_size, target_shift_size) window_size = [f if f <= w else w for (f, w) in zip(self.feat_size, target_window_size)] shift_size = [0 if f <= w else s for (f, w, s) in zip(self.feat_size, window_size, target_shift_size)] return (tuple(window_size), tuple(shift_size)) def get_attn_mask(self, x: Optional[torch.Tensor]=None) -> Optional[torch.Tensor]: if any(self.shift_size): if x is None: img_mask = torch.zeros((1, *self.feat_size, 1)) else: img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) cnt = 0 for h in ((0, -self.window_size[0]), (-self.window_size[0], -self.shift_size[0]), (-self.shift_size[0], None)): for w in ((0, -self.window_size[1]), (-self.window_size[1], -self.shift_size[1]), (-self.shift_size[1], None)): img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def init_weights(self): if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def set_input_size(self, feat_size: Tuple[int, int], window_size: Tuple[int, int]) -> None: self.feat_size: Tuple[int, int] = feat_size (self.window_size, self.shift_size) = self._calc_window_shift(to_2tuple(window_size)) self.window_area = self.window_size[0] * self.window_size[1] self.attn.set_window_size(self.window_size) self.register_buffer('attn_mask', None if self.dynamic_mask else self.get_attn_mask(), persistent=False) def _shifted_window_attn(self, x): (B, H, W, C) = x.shape (sh, sw) = self.shift_size do_shift: bool = any(self.shift_size) if do_shift: x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2)) pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] x = torch.nn.functional.pad(x, (0, 0, 0, pad_w, 0, pad_h)) (_, Hp, Wp, _) = x.shape x_windows = window_partition(x, self.window_size) x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) if getattr(self, 'dynamic_mask', False): attn_mask = self.get_attn_mask(x) else: attn_mask = self.attn_mask attn_windows = self.attn(x_windows, mask=attn_mask) attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) x = x[:, :H, :W, :].contiguous() if do_shift: x = torch.roll(x, shifts=(sh, sw), dims=(1, 2)) return x def forward(self, x: torch.Tensor) -> torch.Tensor: x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x))) (B, H, W, C) = x.shape x = x.reshape(B, -1, C) x = x + self.drop_path2(self.norm2(self.mlp(x))) x = self.norm3(x) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): def __init__(self, dim: int, norm_layer: Type[nn.Module]=nn.LayerNorm) -> None: super(PatchMerging, self).__init__() self.norm = norm_layer(4 * dim) self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: (B, H, W, C) = x.shape pad_values = (0, 0, 0, H % 2, 0, W % 2) x = nn.functional.pad(x, pad_values) (_, H, W, _) = x.shape x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.norm(x) x = self.reduction(x) return x class PatchEmbed(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, strict_img_size=True): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.strict_img_size = strict_img_size self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def set_input_size(self, img_size: Tuple[int, int]): img_size = to_2tuple(img_size) if img_size != self.img_size: self.img_size = img_size self.grid_size = (img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] def forward(self, x): (B, C, H, W) = x.shape if self.strict_img_size: _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") x = self.proj(x) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x class SwinTransformerV2CrStage(nn.Module): def __init__(self, embed_dim: int, depth: int, downscale: bool, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], always_partition: bool=False, dynamic_mask: bool=False, mlp_ratio: float=4.0, init_values: Optional[float]=0.0, proj_drop: float=0.0, drop_attn: float=0.0, drop_path: Union[List[float], float]=0.0, norm_layer: Type[nn.Module]=nn.LayerNorm, extra_norm_period: int=0, extra_norm_stage: bool=False, sequential_attn: bool=False): super(SwinTransformerV2CrStage, self).__init__() self.downscale: bool = downscale self.grad_checkpointing: bool = False self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size if downscale: self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer) embed_dim = embed_dim * 2 else: self.downsample = nn.Identity() def _extra_norm(index): i = index + 1 if extra_norm_period and i % extra_norm_period == 0: return True return i == depth if extra_norm_stage else False self.blocks = nn.Sequential(*[SwinTransformerV2CrBlock(dim=embed_dim, num_heads=num_heads, feat_size=self.feat_size, window_size=window_size, always_partition=always_partition, dynamic_mask=dynamic_mask, shift_size=tuple([0 if index % 2 == 0 else w // 2 for w in window_size]), mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop, drop_attn=drop_attn, drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path, extra_norm=_extra_norm(index), sequential_attn=sequential_attn, norm_layer=norm_layer) for index in range(depth)]) def set_input_size(self, feat_size: Tuple[int, int], window_size: int, always_partition: Optional[bool]=None): self.feat_size = (feat_size[0] // 2, feat_size[1] // 2) if self.downscale else feat_size for block in self.blocks: block.set_input_size(feat_size=self.feat_size, window_size=window_size) def forward(self, x: torch.Tensor) -> torch.Tensor: x = bchw_to_bhwc(x) x = self.downsample(x) for block in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint.checkpoint(block, x) else: x = block(x) x = bhwc_to_bchw(x) return x class SwinTransformerV2Cr(nn.Module): def __init__(self, img_size: Tuple[int, int]=(224, 224), patch_size: int=4, window_size: Optional[int]=None, window_ratio: int=8, always_partition: bool=False, strict_img_size: bool=True, in_chans: int=3, num_classes: int=1000, embed_dim: int=96, depths: Tuple[int, ...]=(2, 2, 6, 2), num_heads: Tuple[int, ...]=(3, 6, 12, 24), mlp_ratio: float=4.0, init_values: Optional[float]=0.0, drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, norm_layer: Type[nn.Module]=nn.LayerNorm, extra_norm_period: int=0, extra_norm_stage: bool=False, sequential_attn: bool=False, global_pool: str='avg', weight_init='skip', **kwargs: Any) -> None: super(SwinTransformerV2Cr, self).__init__() img_size = to_2tuple(img_size) self.num_classes: int = num_classes self.patch_size: int = patch_size self.img_size: Tuple[int, int] = img_size self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.feature_info = [] self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer, strict_img_size=strict_img_size) grid_size = self.patch_embed.grid_size if window_size is None: self.window_size = tuple([s // window_ratio for s in grid_size]) else: self.window_size = to_2tuple(window_size) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] in_dim = embed_dim in_scale = 1 for (stage_idx, (depth, num_heads)) in enumerate(zip(depths, num_heads)): stages += [SwinTransformerV2CrStage(embed_dim=in_dim, depth=depth, downscale=stage_idx != 0, feat_size=(grid_size[0] // in_scale, grid_size[1] // in_scale), num_heads=num_heads, window_size=self.window_size, always_partition=always_partition, dynamic_mask=not strict_img_size, mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop_rate, drop_attn=attn_drop_rate, drop_path=dpr[stage_idx], extra_norm_period=extra_norm_period, extra_norm_stage=extra_norm_stage or stage_idx + 1 == len(depths), sequential_attn=sequential_attn, norm_layer=norm_layer)] if stage_idx != 0: in_dim *= 2 in_scale *= 2 self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')] self.stages = nn.Sequential(*stages) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) if weight_init != 'skip': named_apply(init_weights, self) def set_input_size(self, img_size: Optional[Tuple[int, int]]=None, window_size: Optional[Tuple[int, int]]=None, window_ratio: int=8, always_partition: Optional[bool]=None) -> None: if img_size is not None: self.patch_embed.set_input_size(img_size=img_size) grid_size = self.patch_embed.grid_size if window_size is None and window_ratio is not None: window_size = tuple([s // window_ratio for s in grid_size]) for (index, stage) in enumerate(self.stages): stage_scale = 2 ** max(index - 1, 0) stage.set_input_size(feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale), window_size=window_size, always_partition=always_partition) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^patch_embed', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+).downsample', (0,)), ('^stages\\.(\\d+)\\.\\w+\\.(\\d+)', None)]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore() def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None) -> None: self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stages), indices) x = self.patch_embed(x) if torch.jit.is_scripting() or not stop_early: stages = self.stages else: stages = self.stages[:max_index + 1] for (i, stage) in enumerate(stages): x = stage(x) if i in take_indices: intermediates.append(x) if intermediates_only: return intermediates return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def init_weights(module: nn.Module, name: str=''): if isinstance(module, nn.Linear): if 'qkv' in name: val = math.sqrt(6.0 / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) elif 'head' in name: nn.init.zeros_(module.weight) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def checkpoint_filter_fn(state_dict, model): state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) if 'head.fc.weight' in state_dict: return state_dict out_dict = {} for (k, v) in state_dict.items(): if 'tau' in k: v = torch.log(1 / v) k = k.replace('tau', 'logit_scale') k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs): default_out_indices = tuple((i for (i, _) in enumerate(kwargs.get('depths', (1, 1, 1, 1))))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg(SwinTransformerV2Cr, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'swinv2_cr_tiny_384.untrained': _cfg(url='', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_tiny_224.untrained': _cfg(url='', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_tiny_ns_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_384.untrained': _cfg(url='', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_small_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_224.sw_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_256.untrained': _cfg(url='', input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)), 'swinv2_cr_base_384.untrained': _cfg(url='', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_base_224.untrained': _cfg(url='', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_base_ns_224.untrained': _cfg(url='', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_large_384.untrained': _cfg(url='', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_large_224.untrained': _cfg(url='', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_huge_384.untrained': _cfg(url='', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_huge_224.untrained': _cfg(url='', input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_giant_384.untrained': _cfg(url='', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_giant_224.untrained': _cfg(url='', input_size=(3, 224, 224), crop_pct=0.9)}) @register_model def swinv2_cr_tiny_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_256(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), extra_norm_stage=True) return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=352, depths=(2, 2, 18, 2), num_heads=(11, 22, 44, 88), extra_norm_period=6) return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=352, depths=(2, 2, 18, 2), num_heads=(8, 16, 32, 64), extra_norm_period=6) return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6) return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: model_args = dict(embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6) return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/tiny_vit.py """""" __all__ = ['TinyVit'] import itertools from functools import partial from typing import Dict, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import LayerNorm2d, NormMlpClassifierHead, DropPath, trunc_normal_, resize_rel_pos_bias_table_levit, use_fused_attn from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs class ConvNorm(torch.nn.Sequential): def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False) self.bn = nn.BatchNorm2d(out_chs) torch.nn.init.constant_(self.bn.weight, bn_weight_init) torch.nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): (c, bn) = (self.conv, self.bn) w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = torch.nn.Conv2d(w.size(1) * self.conv.groups, w.size(0), w.shape[2:], stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class PatchEmbed(nn.Module): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.stride = 4 self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) self.act = act_layer() self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) def forward(self, x): x = self.conv1(x) x = self.act(x) x = self.conv2(x) return x class MBConv(nn.Module): def __init__(self, in_chs, out_chs, expand_ratio, act_layer, drop_path): super().__init__() mid_chs = int(in_chs * expand_ratio) self.conv1 = ConvNorm(in_chs, mid_chs, ks=1) self.act1 = act_layer() self.conv2 = ConvNorm(mid_chs, mid_chs, ks=3, stride=1, pad=1, groups=mid_chs) self.act2 = act_layer() self.conv3 = ConvNorm(mid_chs, out_chs, ks=1, bn_weight_init=0.0) self.act3 = act_layer() self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = x x = self.conv1(x) x = self.act1(x) x = self.conv2(x) x = self.act2(x) x = self.conv3(x) x = self.drop_path(x) x += shortcut x = self.act3(x) return x class PatchMerging(nn.Module): def __init__(self, dim, out_dim, act_layer): super().__init__() self.conv1 = ConvNorm(dim, out_dim, 1, 1, 0) self.act1 = act_layer() self.conv2 = ConvNorm(out_dim, out_dim, 3, 2, 1, groups=out_dim) self.act2 = act_layer() self.conv3 = ConvNorm(out_dim, out_dim, 1, 1, 0) def forward(self, x): x = self.conv1(x) x = self.act1(x) x = self.conv2(x) x = self.act2(x) x = self.conv3(x) return x class ConvLayer(nn.Module): def __init__(self, dim, depth, act_layer, drop_path=0.0, conv_expand_ratio=4.0): super().__init__() self.dim = dim self.depth = depth self.blocks = nn.Sequential(*[MBConv(dim, dim, conv_expand_ratio, act_layer, drop_path[i] if isinstance(drop_path, list) else drop_path) for i in range(depth)]) def forward(self, x): x = self.blocks(x) return x class NormMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, norm_layer=nn.LayerNorm, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.norm = norm_layer(in_features) self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.drop1 = nn.Dropout(drop) self.fc2 = nn.Linear(hidden_features, out_features) self.drop2 = nn.Dropout(drop) def forward(self, x): x = self.norm(x) x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class Attention(torch.nn.Module): fused_attn: torch.jit.Final[bool] attention_bias_cache: Dict[str, torch.Tensor] def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=(14, 14)): super().__init__() assert isinstance(resolution, tuple) and len(resolution) == 2 self.num_heads = num_heads self.scale = key_dim ** (-0.5) self.key_dim = key_dim self.val_dim = int(attn_ratio * key_dim) self.out_dim = self.val_dim * num_heads self.attn_ratio = attn_ratio self.resolution = resolution self.fused_attn = use_fused_attn() self.norm = nn.LayerNorm(dim) self.qkv = nn.Linear(dim, num_heads * (self.val_dim + 2 * key_dim)) self.proj = nn.Linear(self.out_dim, dim) points = list(itertools.product(range(resolution[0]), range(resolution[1]))) N = len(points) attention_offsets = {} idxs = [] for p1 in points: for p2 in points: offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) idxs.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): attn_bias = self.get_attention_biases(x.device) (B, N, _) = x.shape x = self.norm(x) qkv = self.qkv(x) (q, k, v) = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3) q = q.permute(0, 2, 1, 3) k = k.permute(0, 2, 1, 3) v = v.permute(0, 2, 1, 3) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn + attn_bias attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, N, self.out_dim) x = self.proj(x) return x class TinyVitBlock(nn.Module): def __init__(self, dim, num_heads, window_size=7, mlp_ratio=4.0, drop=0.0, drop_path=0.0, local_conv_size=3, act_layer=nn.GELU): super().__init__() self.dim = dim self.num_heads = num_heads assert window_size > 0, 'window_size must be greater than 0' self.window_size = window_size self.mlp_ratio = mlp_ratio assert dim % num_heads == 0, 'dim must be divisible by num_heads' head_dim = dim // num_heads window_resolution = (window_size, window_size) self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = NormMlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() pad = local_conv_size // 2 self.local_conv = ConvNorm(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim) def forward(self, x): (B, H, W, C) = x.shape L = H * W shortcut = x if H == self.window_size and W == self.window_size: x = x.reshape(B, L, C) x = self.attn(x) x = x.view(B, H, W, C) else: pad_b = (self.window_size - H % self.window_size) % self.window_size pad_r = (self.window_size - W % self.window_size) % self.window_size padding = pad_b > 0 or pad_r > 0 if padding: x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) (pH, pW) = (H + pad_b, W + pad_r) nH = pH // self.window_size nW = pW // self.window_size x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(B * nH * nW, self.window_size * self.window_size, C) x = self.attn(x) x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C) if padding: x = x[:, :H, :W].contiguous() x = shortcut + self.drop_path1(x) x = x.permute(0, 3, 1, 2) x = self.local_conv(x) x = x.reshape(B, C, L).transpose(1, 2) x = x + self.drop_path2(self.mlp(x)) return x.view(B, H, W, C) def extra_repr(self) -> str: return f'dim={self.dim}, num_heads={self.num_heads}, window_size={self.window_size}, mlp_ratio={self.mlp_ratio}' register_notrace_module(TinyVitBlock) class TinyVitStage(nn.Module): def __init__(self, dim, out_dim, depth, num_heads, window_size, mlp_ratio=4.0, drop=0.0, drop_path=0.0, downsample=None, local_conv_size=3, act_layer=nn.GELU): super().__init__() self.depth = depth self.out_dim = out_dim if downsample is not None: self.downsample = downsample(dim=dim, out_dim=out_dim, act_layer=act_layer) else: self.downsample = nn.Identity() assert dim == out_dim self.blocks = nn.Sequential(*[TinyVitBlock(dim=out_dim, num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, drop=drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, local_conv_size=local_conv_size, act_layer=act_layer) for i in range(depth)]) def forward(self, x): x = self.downsample(x) x = x.permute(0, 2, 3, 1) x = self.blocks(x) x = x.permute(0, 3, 1, 2) return x def extra_repr(self) -> str: return f'dim={self.out_dim}, depth={self.depth}' class TinyVit(nn.Module): def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', embed_dims=(96, 192, 384, 768), depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), window_sizes=(7, 7, 14, 7), mlp_ratio=4.0, drop_rate=0.0, drop_path_rate=0.1, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, act_layer=nn.GELU): super().__init__() self.num_classes = num_classes self.depths = depths self.num_stages = len(depths) self.mlp_ratio = mlp_ratio self.grad_checkpointing = use_checkpoint self.patch_embed = PatchEmbed(in_chs=in_chans, out_chs=embed_dims[0], act_layer=act_layer) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] self.stages = nn.Sequential() stride = self.patch_embed.stride prev_dim = embed_dims[0] self.feature_info = [] for stage_idx in range(self.num_stages): if stage_idx == 0: stage = ConvLayer(dim=prev_dim, depth=depths[stage_idx], act_layer=act_layer, drop_path=dpr[:depths[stage_idx]], conv_expand_ratio=mbconv_expand_ratio) else: out_dim = embed_dims[stage_idx] drop_path_rate = dpr[sum(depths[:stage_idx]):sum(depths[:stage_idx + 1])] stage = TinyVitStage(dim=embed_dims[stage_idx - 1], out_dim=out_dim, depth=depths[stage_idx], num_heads=num_heads[stage_idx], window_size=window_sizes[stage_idx], mlp_ratio=self.mlp_ratio, drop=drop_rate, local_conv_size=local_conv_size, drop_path=drop_path_rate, downsample=PatchMerging, act_layer=act_layer) prev_dim = out_dim stride *= 2 self.stages.append(stage) self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{stage_idx}')] self.num_features = self.head_hidden_size = embed_dims[-1] norm_layer_cf = partial(LayerNorm2d, eps=1e-05) self.head = NormMlpClassifierHead(self.num_features, num_classes, pool_type=global_pool, norm_layer=norm_layer_cf) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay_keywords(self): return {'attention_biases'} @torch.jit.ignore def no_weight_decay(self): return {x for x in self.state_dict().keys() if 'attention_biases' in x} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^patch_embed', blocks='^stages\\.(\\d+)' if coarse else [('^stages\\.(\\d+).downsample', (0,)), ('^stages\\.(\\d+)\\.\\w+\\.(\\d+)', None)]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict.keys(): state_dict = state_dict['model'] target_sd = model.state_dict() out_dict = {} for (k, v) in state_dict.items(): if k.endswith('attention_bias_idxs'): continue if 'attention_biases' in k: v = resize_rel_pos_bias_table_levit(v.T, target_sd[k].shape[::-1]).T out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv1.conv', 'classifier': 'head.fc', 'pool_size': (7, 7), 'input_size': (3, 224, 224), 'crop_pct': 0.95, **kwargs} default_cfgs = generate_default_cfgs({'tiny_vit_5m_224.dist_in22k': _cfg(hf_hub_id='timm/', num_classes=21841), 'tiny_vit_5m_224.dist_in22k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tiny_vit_5m_224.in1k': _cfg(hf_hub_id='timm/'), 'tiny_vit_11m_224.dist_in22k': _cfg(hf_hub_id='timm/', num_classes=21841), 'tiny_vit_11m_224.dist_in22k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tiny_vit_11m_224.in1k': _cfg(hf_hub_id='timm/'), 'tiny_vit_21m_224.dist_in22k': _cfg(hf_hub_id='timm/', num_classes=21841), 'tiny_vit_21m_224.dist_in22k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tiny_vit_21m_224.in1k': _cfg(hf_hub_id='timm/'), 'tiny_vit_21m_384.dist_in22k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'tiny_vit_21m_512.dist_in22k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash')}) def _create_tiny_vit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg(TinyVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model @register_model def tiny_vit_5m_224(pretrained=False, **kwargs): model_kwargs = dict(embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], drop_path_rate=0.0) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_5m_224', pretrained, **model_kwargs) @register_model def tiny_vit_11m_224(pretrained=False, **kwargs): model_kwargs = dict(embed_dims=[64, 128, 256, 448], depths=[2, 2, 6, 2], num_heads=[2, 4, 8, 14], window_sizes=[7, 7, 14, 7], drop_path_rate=0.1) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_11m_224', pretrained, **model_kwargs) @register_model def tiny_vit_21m_224(pretrained=False, **kwargs): model_kwargs = dict(embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[7, 7, 14, 7], drop_path_rate=0.2) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_21m_224', pretrained, **model_kwargs) @register_model def tiny_vit_21m_384(pretrained=False, **kwargs): model_kwargs = dict(embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[12, 12, 24, 12], drop_path_rate=0.1) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_21m_384', pretrained, **model_kwargs) @register_model def tiny_vit_21m_512(pretrained=False, **kwargs): model_kwargs = dict(embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[16, 16, 32, 16], drop_path_rate=0.1) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_21m_512', pretrained, **model_kwargs) # File: pytorch-image-models-main/timm/models/tnt.py """""" import math from typing import Optional import torch import torch.nn as nn from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, trunc_normal_, _assert, to_2tuple from ._builder import build_model_with_cfg from ._registry import register_model from .vision_transformer import resize_pos_embed __all__ = ['TNT'] def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'pixel_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = {'tnt_s_patch16_224': _cfg(url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), 'tnt_b_patch16_224': _cfg(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))} class Attention(nn.Module): def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.hidden_dim = hidden_dim self.num_heads = num_heads head_dim = hidden_dim // num_heads self.head_dim = head_dim self.scale = head_dim ** (-0.5) self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop, inplace=True) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop, inplace=True) def forward(self, x): (B, N, C) = x.shape qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k) = qk.unbind(0) v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, dim_out, num_pixel, num_heads_in=4, num_heads_out=12, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm_in = norm_layer(dim) self.attn_in = Attention(dim, dim, num_heads=num_heads_in, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.norm_mlp_in = norm_layer(dim) self.mlp_in = Mlp(in_features=dim, hidden_features=int(dim * 4), out_features=dim, act_layer=act_layer, drop=proj_drop) self.norm1_proj = norm_layer(dim) self.proj = nn.Linear(dim * num_pixel, dim_out, bias=True) self.norm_out = norm_layer(dim_out) self.attn_out = Attention(dim_out, dim_out, num_heads=num_heads_out, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm_mlp = norm_layer(dim_out) self.mlp = Mlp(in_features=dim_out, hidden_features=int(dim_out * mlp_ratio), out_features=dim_out, act_layer=act_layer, drop=proj_drop) def forward(self, pixel_embed, patch_embed): pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) (B, N, C) = patch_embed.size() patch_embed = torch.cat([patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))], dim=1) patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) return (pixel_embed, patch_embed) class PixelEmbed(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) num_patches = self.grid_size[0] * self.grid_size[1] self.img_size = img_size self.num_patches = num_patches self.in_dim = in_dim new_patch_size = [math.ceil(ps / stride) for ps in patch_size] self.new_patch_size = new_patch_size self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) def forward(self, x, pixel_pos): (B, C, H, W) = x.shape _assert(H == self.img_size[0], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") _assert(W == self.img_size[1], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") x = self.proj(x) x = self.unfold(x) x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) x = x + pixel_pos x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) return x class TNT(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, inner_dim=48, depth=12, num_heads_inner=4, num_heads_outer=12, mlp_ratio=4.0, qkv_bias=False, drop_rate=0.0, pos_drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, first_stride=4): super().__init__() assert global_pool in ('', 'token', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.grad_checkpointing = False self.pixel_embed = PixelEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=inner_dim, stride=first_stride) num_patches = self.pixel_embed.num_patches self.num_patches = num_patches new_patch_size = self.pixel_embed.new_patch_size num_pixel = new_patch_size[0] * new_patch_size[1] self.norm1_proj = norm_layer(num_pixel * inner_dim) self.proj = nn.Linear(num_pixel * inner_dim, embed_dim) self.norm2_proj = norm_layer(embed_dim) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) self.pixel_pos = nn.Parameter(torch.zeros(1, inner_dim, new_patch_size[0], new_patch_size[1])) self.pos_drop = nn.Dropout(p=pos_drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] blocks = [] for i in range(depth): blocks.append(Block(dim=inner_dim, dim_out=embed_dim, num_pixel=num_pixel, num_heads_in=num_heads_inner, num_heads_out=num_heads_outer, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)) self.blocks = nn.ModuleList(blocks) self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=0.02) trunc_normal_(self.patch_pos, std=0.02) trunc_normal_(self.pixel_pos, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'patch_pos', 'pixel_pos', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^cls_token|patch_pos|pixel_pos|pixel_embed|norm[12]_proj|proj', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): B = x.shape[0] pixel_embed = self.pixel_embed(x, self.pixel_pos) patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) patch_embed = patch_embed + self.patch_pos patch_embed = self.pos_drop(patch_embed) if self.grad_checkpointing and (not torch.jit.is_scripting()): for blk in self.blocks: (pixel_embed, patch_embed) = checkpoint(blk, pixel_embed, patch_embed) else: for blk in self.blocks: (pixel_embed, patch_embed) = blk(pixel_embed, patch_embed) patch_embed = self.norm(patch_embed) return patch_embed def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if state_dict['patch_pos'].shape != model.patch_pos.shape: state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) return state_dict def _create_tnt(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(TNT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model @register_model def tnt_s_patch16_224(pretrained=False, **kwargs) -> TNT: model_cfg = dict(patch_size=16, embed_dim=384, inner_dim=24, depth=12, num_heads_outer=6, qkv_bias=False) model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def tnt_b_patch16_224(pretrained=False, **kwargs) -> TNT: model_cfg = dict(patch_size=16, embed_dim=640, inner_dim=40, depth=12, num_heads_outer=10, qkv_bias=False) model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model # File: pytorch-image-models-main/timm/models/tresnet.py """""" from collections import OrderedDict from functools import partial from typing import Optional import torch import torch.nn as nn from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule, ConvNormAct, DropPath from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['TResNet'] class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None, drop_path_rate=0.0): super(BasicBlock, self).__init__() self.downsample = downsample self.stride = stride act_layer = partial(nn.LeakyReLU, negative_slope=0.001) self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer) self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False) self.act = nn.ReLU(inplace=True) rd_chs = max(planes * self.expansion // 4, 64) self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, act_layer=None, aa_layer=None, drop_path_rate=0.0): super(Bottleneck, self).__init__() self.downsample = downsample self.stride = stride act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=0.001) self.conv1 = ConvNormAct(inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer) self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer) reduction_chs = max(planes * self.expansion // 8, 64) self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None self.conv3 = ConvNormAct(planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act = nn.ReLU(inplace=True) def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.conv3(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class TResNet(nn.Module): def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, v2=False, global_pool='fast', drop_rate=0.0, drop_path_rate=0.0): self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False super(TResNet, self).__init__() aa_layer = BlurPool2d act_layer = nn.LeakyReLU self.inplanes = int(64 * width_factor) self.planes = int(64 * width_factor) if v2: self.inplanes = self.inplanes // 8 * 8 self.planes = self.planes // 8 * 8 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer) layer1 = self._make_layer(Bottleneck if v2 else BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0]) layer2 = self._make_layer(Bottleneck if v2 else BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1]) layer3 = self._make_layer(Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2]) layer4 = self._make_layer(Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3]) self.body = nn.Sequential(OrderedDict([('s2d', SpaceToDepth()), ('conv1', conv1), ('layer1', layer1), ('layer2', layer2), ('layer3', layer3), ('layer4', layer4)])) self.feature_info = [dict(num_chs=self.planes, reduction=2, module=''), dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'), dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'), dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4')] self.num_features = self.head_hidden_size = self.planes * 8 * Bottleneck.expansion self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) for m in self.modules(): if isinstance(m, BasicBlock): nn.init.zeros_(m.conv2.bn.weight) if isinstance(m, Bottleneck): nn.init.zeros_(m.conv3.bn.weight) def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: layers = [] if stride == 2: layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) layers += [ConvNormAct(self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False)] downsample = nn.Sequential(*layers) layers = [] for i in range(blocks): layers.append(block(self.inplanes, planes, stride=stride if i == 0 else 1, downsample=downsample if i == 0 else None, use_se=use_se, aa_layer=aa_layer, drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate)) self.inplanes = planes * block.expansion return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^body\\.conv1', blocks='^body\\.layer(\\d+)' if coarse else '^body\\.layer(\\d+)\\.(\\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): if self.grad_checkpointing and (not torch.jit.is_scripting()): x = self.body.s2d(x) x = self.body.conv1(x) x = checkpoint_seq([self.body.layer1, self.body.layer2, self.body.layer3, self.body.layer4], x, flatten=True) else: x = self.body(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'body.conv1.conv.weight' in state_dict: return state_dict import re state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) out_dict = {} for (k, v) in state_dict.items(): k = re.sub('conv(\\d+)\\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub('conv(\\d+)\\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub('conv(\\d+)\\.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub('conv(\\d+)\\.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub('downsample\\.(\\d+)\\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k) k = re.sub('downsample\\.(\\d+)\\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k) if k.endswith('bn.weight'): v = v.abs().add(1e-05) out_dict[k] = v return out_dict def _create_tresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg(TResNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': (0.0, 0.0, 0.0), 'std': (1.0, 1.0, 1.0), 'first_conv': 'body.conv1.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), 'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in1k_448': _cfg(input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_l.miil_in1k_448': _cfg(input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_xl.miil_in1k_448': _cfg(input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221)}) @register_model def tresnet_m(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[3, 4, 11, 3]) return _create_tresnet('tresnet_m', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_l(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[4, 5, 18, 3], width_factor=1.2) return _create_tresnet('tresnet_l', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_xl(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[4, 5, 24, 3], width_factor=1.3) return _create_tresnet('tresnet_xl', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True) return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, {'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k', 'tresnet_m_448': 'tresnet_m.miil_in1k_448', 'tresnet_l_448': 'tresnet_l.miil_in1k_448', 'tresnet_xl_448': 'tresnet_xl.miil_in1k_448'}) # File: pytorch-image-models-main/timm/models/twins.py """""" import math from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, to_2tuple, trunc_normal_, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs from .vision_transformer import Attention __all__ = ['Twins'] Size_ = Tuple[int, int] @register_notrace_module class LocallyGroupedAttn(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, attn_drop=0.0, proj_drop=0.0, ws=1): assert ws != 1 super(LocallyGroupedAttn, self).__init__() assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.' self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=True) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.ws = ws def forward(self, x, size: Size_): (B, N, C) = x.shape (H, W) = size x = x.view(B, H, W, C) pad_l = pad_t = 0 pad_r = (self.ws - W % self.ws) % self.ws pad_b = (self.ws - H % self.ws) % self.ws x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) (_, Hp, Wp, _) = x.shape (_h, _w) = (Hp // self.ws, Wp // self.ws) x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) qkv = self.qkv(x).reshape(B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) (q, k, v) = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) x = x.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) if pad_r > 0 or pad_b > 0: x = x[:, :H, :W, :].contiguous() x = x.reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class GlobalSubSampleAttn(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, attn_drop=0.0, proj_drop=0.0, sr_ratio=1): super().__init__() assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.' self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=True) self.kv = nn.Linear(dim, dim * 2, bias=True) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) else: self.sr = None self.norm = None def forward(self, x, size: Size_): (B, N, C) = x.shape q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) if self.sr is not None: x = x.permute(0, 2, 1).reshape(B, C, *size) x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (k, v) = kv.unbind(0) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None): super().__init__() self.norm1 = norm_layer(dim) if ws is None: self.attn = Attention(dim, num_heads, False, None, attn_drop, proj_drop) elif ws == 1: self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, proj_drop, sr_ratio) else: self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, proj_drop, ws) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x, size: Size_): x = x + self.drop_path1(self.attn(self.norm1(x), size)) x = x + self.drop_path2(self.mlp(self.norm2(x))) return x class PosConv(nn.Module): def __init__(self, in_chans, embed_dim=768, stride=1): super(PosConv, self).__init__() self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim)) self.stride = stride def forward(self, x, size: Size_): (B, N, C) = x.shape cnn_feat_token = x.transpose(1, 2).view(B, C, *size) x = self.proj(cnn_feat_token) if self.stride == 1: x += cnn_feat_token x = x.flatten(2).transpose(1, 2) return x def no_weight_decay(self): return ['proj.%d.weight' % i for i in range(4)] class PatchEmbed(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, f'img_size {img_size} should be divided by patch_size {patch_size}.' (self.H, self.W) = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.H * self.W self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = nn.LayerNorm(embed_dim) def forward(self, x) -> Tuple[torch.Tensor, Size_]: (B, C, H, W) = x.shape x = self.proj(x).flatten(2).transpose(1, 2) x = self.norm(x) out_size = (H // self.patch_size[0], W // self.patch_size[1]) return (x, out_size) class Twins(nn.Module): def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, global_pool='avg', embed_dims=(64, 128, 256, 512), num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None, drop_rate=0.0, pos_drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=partial(nn.LayerNorm, eps=1e-06), block_cls=Block): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.depths = depths self.embed_dims = embed_dims self.num_features = self.head_hidden_size = embed_dims[-1] self.grad_checkpointing = False img_size = to_2tuple(img_size) prev_chs = in_chans self.patch_embeds = nn.ModuleList() self.pos_drops = nn.ModuleList() for i in range(len(depths)): self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) self.pos_drops.append(nn.Dropout(p=pos_drop_rate)) prev_chs = embed_dims[i] img_size = tuple((t // patch_size for t in img_size)) patch_size = 2 self.blocks = nn.ModuleList() self.feature_info = [] dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] cur = 0 for k in range(len(depths)): _block = nn.ModuleList([block_cls(dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])]) self.blocks.append(_block) self.feature_info += [dict(module=f'block.{k}', num_chs=embed_dims[k], reduction=2 ** (2 + k))] cur += depths[k] self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) self.norm = norm_layer(self.num_features) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) @torch.jit.ignore def no_weight_decay(self): return set(['pos_block.' + n for (n, p) in self.pos_block.named_parameters()]) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem='^patch_embeds.0', blocks=[('^(?:blocks|patch_embeds|pos_block)\\.(\\d+)', None), ('^norm', (99999,))] if coarse else [('^blocks\\.(\\d+)\\.(\\d+)', None), ('^(?:patch_embeds|pos_block)\\.(\\d+)', (0,)), ('^norm', (99999,))]) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt == 'NCHW', 'Output shape for Twins must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape for (i, (embed, drop, blocks, pos_blk)) in enumerate(zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): (x, size) = embed(x) x = drop(x) for (j, blk) in enumerate(blocks): x = blk(x, size) if j == 0: x = pos_blk(x, size) if i < len(self.depths) - 1: x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() if i in take_indices: intermediates.append(x) elif i in take_indices: x_feat = self.norm(x) if norm else x intermediates.append(x_feat.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): B = x.shape[0] for (i, (embed, drop, blocks, pos_blk)) in enumerate(zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): (x, size) = embed(x) x = drop(x) for (j, blk) in enumerate(blocks): x = blk(x, size) if j == 0: x = pos_blk(x, size) if i < len(self.depths) - 1: x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_twins(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 4) model = build_model_with_cfg(Twins, variant, pretrained, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'twins_pcpvt_small.in1k': _cfg(hf_hub_id='timm/'), 'twins_pcpvt_base.in1k': _cfg(hf_hub_id='timm/'), 'twins_pcpvt_large.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_small.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_base.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_large.in1k': _cfg(hf_hub_id='timm/')}) @register_model def twins_pcpvt_small(pretrained=False, **kwargs) -> Twins: model_args = dict(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_pcpvt_base(pretrained=False, **kwargs) -> Twins: model_args = dict(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_pcpvt_large(pretrained=False, **kwargs) -> Twins: model_args = dict(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_large', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_small(pretrained=False, **kwargs) -> Twins: model_args = dict(patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_base(pretrained=False, **kwargs) -> Twins: model_args = dict(patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_large(pretrained=False, **kwargs) -> Twins: model_args = dict(patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_large', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/vgg.py """""" from typing import Any, Dict, List, Optional, Union, cast import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs __all__ = ['VGG'] cfgs: Dict[str, List[Union[str, int]]] = {'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']} @register_notrace_module class ConvMlp(nn.Module): def __init__(self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, drop_rate: float=0.2, act_layer: nn.Module=None, conv_layer: nn.Module=None): super(ConvMlp, self).__init__() self.input_kernel_size = kernel_size mid_features = int(out_features * mlp_ratio) self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) self.act1 = act_layer(True) self.drop = nn.Dropout(drop_rate) self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) self.act2 = act_layer(True) def forward(self, x): if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) x = F.adaptive_avg_pool2d(x, output_size) x = self.fc1(x) x = self.act1(x) x = self.drop(x) x = self.fc2(x) x = self.act2(x) return x class VGG(nn.Module): def __init__(self, cfg: List[Any], num_classes: int=1000, in_chans: int=3, output_stride: int=32, mlp_ratio: float=1.0, act_layer: nn.Module=nn.ReLU, conv_layer: nn.Module=nn.Conv2d, norm_layer: nn.Module=None, global_pool: str='avg', drop_rate: float=0.0) -> None: super(VGG, self).__init__() assert output_stride == 32 self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.use_norm = norm_layer is not None self.feature_info = [] prev_chs = in_chans net_stride = 1 pool_layer = nn.MaxPool2d layers: List[nn.Module] = [] for v in cfg: last_idx = len(layers) - 1 if v == 'M': self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) layers += [pool_layer(kernel_size=2, stride=2)] net_stride *= 2 else: v = cast(int, v) conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) if norm_layer is not None: layers += [conv2d, norm_layer(v), act_layer(inplace=True)] else: layers += [conv2d, act_layer(inplace=True)] prev_chs = v self.features = nn.Sequential(*layers) self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) self.num_features = prev_chs self.head_hidden_size = 4096 self.pre_logits = ConvMlp(prev_chs, self.head_hidden_size, 7, mlp_ratio=mlp_ratio, drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer) self.head = ClassifierHead(self.head_hidden_size, num_classes, pool_type=global_pool, drop_rate=drop_rate) self._initialize_weights() @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^features\\.0', blocks='^features\\.(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool=False): x = self.pre_logits(x) return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def _filter_fn(state_dict): out_dict = {} for (k, v) in state_dict.items(): k_r = k k_r = k_r.replace('classifier.0', 'pre_logits.fc1') k_r = k_r.replace('classifier.3', 'pre_logits.fc2') k_r = k_r.replace('classifier.6', 'head.fc') if 'classifier.0.weight' in k: v = v.reshape(-1, 512, 7, 7) if 'classifier.3.weight' in k: v = v.reshape(-1, 4096, 1, 1) out_dict[k_r] = v return out_dict def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: cfg = variant.split('_')[0] out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5)) model = build_model_with_cfg(VGG, variant, pretrained, model_cfg=cfgs[cfg], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), pretrained_filter_fn=_filter_fn, **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.0', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/')}) @register_model def vgg11(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(**kwargs) return _create_vgg('vgg11', pretrained=pretrained, **model_args) @register_model def vgg11_bn(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) @register_model def vgg13(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(**kwargs) return _create_vgg('vgg13', pretrained=pretrained, **model_args) @register_model def vgg13_bn(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) @register_model def vgg16(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(**kwargs) return _create_vgg('vgg16', pretrained=pretrained, **model_args) @register_model def vgg16_bn(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) @register_model def vgg19(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(**kwargs) return _create_vgg('vgg19', pretrained=pretrained, **model_args) @register_model def vgg19_bn(pretrained: bool=False, **kwargs: Any) -> VGG: model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) # File: pytorch-image-models-main/timm/models/visformer.py """""" import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['Visformer'] class SpatialMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, group=8, spatial_conv=False): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features drop_probs = to_2tuple(drop) self.in_features = in_features self.out_features = out_features self.spatial_conv = spatial_conv if self.spatial_conv: if group < 2: hidden_features = in_features * 5 // 6 else: hidden_features = in_features * 2 self.hidden_features = hidden_features self.group = group self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) self.act1 = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if self.spatial_conv: self.conv2 = nn.Conv2d(hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) self.act2 = act_layer() else: self.conv2 = None self.act2 = None self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) self.drop3 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.conv1(x) x = self.act1(x) x = self.drop1(x) if self.conv2 is not None: x = self.conv2(x) x = self.act2(x) x = self.conv3(x) x = self.drop3(x) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, head_dim_ratio=1.0, attn_drop=0.0, proj_drop=0.0): super().__init__() self.dim = dim self.num_heads = num_heads head_dim = round(dim // num_heads * head_dim_ratio) self.head_dim = head_dim self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn(experimental=True) self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, C, H, W) = x.shape x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) (q, k, v) = x.unbind(0) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention(q.contiguous(), k.contiguous(), v.contiguous(), dropout_p=self.attn_drop.p if self.training else 0.0) else: attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, head_dim_ratio=1.0, mlp_ratio=4.0, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=LayerNorm2d, group=8, attn_disabled=False, spatial_conv=False): super().__init__() self.spatial_conv = spatial_conv self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() if attn_disabled: self.norm1 = None self.attn = None else: self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=proj_drop) self.norm2 = norm_layer(dim) self.mlp = SpatialMlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, group=group, spatial_conv=spatial_conv) def forward(self, x): if self.attn is not None: x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class Visformer(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4.0, drop_rate=0.0, pos_drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=LayerNorm2d, attn_stage='111', use_pos_embed=True, spatial_conv='111', vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None): super().__init__() img_size = to_2tuple(img_size) self.num_classes = num_classes self.embed_dim = embed_dim self.init_channels = init_channels self.img_size = img_size self.vit_stem = vit_stem self.conv_init = conv_init if isinstance(depth, (list, tuple)): (self.stage_num1, self.stage_num2, self.stage_num3) = depth depth = sum(depth) else: self.stage_num1 = self.stage_num3 = depth // 3 self.stage_num2 = depth - self.stage_num1 - self.stage_num3 self.use_pos_embed = use_pos_embed self.grad_checkpointing = False dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] if self.vit_stem: self.stem = None self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) img_size = [x // patch_size for x in img_size] elif self.init_channels is None: self.stem = None self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) img_size = [x // (patch_size // 2) for x in img_size] else: self.stem = nn.Sequential(nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), nn.BatchNorm2d(self.init_channels), nn.ReLU(inplace=True)) img_size = [x // 2 for x in img_size] self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) img_size = [x // (patch_size // 4) for x in img_size] if self.use_pos_embed: if self.vit_stem: self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) else: self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim // 2, *img_size)) self.pos_drop = nn.Dropout(p=pos_drop_rate) else: self.pos_embed1 = None self.stage1 = nn.Sequential(*[Block(dim=embed_dim // 2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=attn_stage[0] == '0', spatial_conv=spatial_conv[0] == '1') for i in range(self.stage_num1)]) if not self.vit_stem: self.patch_embed2 = PatchEmbed(img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) img_size = [x // (patch_size // 8) for x in img_size] if self.use_pos_embed: self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) else: self.pos_embed2 = None else: self.patch_embed2 = None self.stage2 = nn.Sequential(*[Block(dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=attn_stage[1] == '0', spatial_conv=spatial_conv[1] == '1') for i in range(self.stage_num1, self.stage_num1 + self.stage_num2)]) if not self.vit_stem: self.patch_embed3 = PatchEmbed(img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False) img_size = [x // (patch_size // 8) for x in img_size] if self.use_pos_embed: self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim * 2, *img_size)) else: self.pos_embed3 = None else: self.patch_embed3 = None self.stage3 = nn.Sequential(*[Block(dim=embed_dim * 2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=attn_stage[2] == '0', spatial_conv=spatial_conv[2] == '1') for i in range(self.stage_num1 + self.stage_num2, depth)]) self.num_features = self.head_hidden_size = embed_dim if self.vit_stem else embed_dim * 2 self.norm = norm_layer(self.num_features) (global_pool, head) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head if self.use_pos_embed: trunc_normal_(self.pos_embed1, std=0.02) if not self.vit_stem: trunc_normal_(self.pos_embed2, std=0.02) trunc_normal_(self.pos_embed3, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): if self.conv_init: nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') else: trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0.0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^patch_embed1|pos_embed1|stem', blocks=[('^stage(\\d+)\\.(\\d+)' if coarse else '^stage(\\d+)\\.(\\d+)', None), ('^(?:patch_embed|pos_embed)(\\d+)', (0,)), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.head) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): if self.stem is not None: x = self.stem(x) x = self.patch_embed1(x) if self.pos_embed1 is not None: x = self.pos_drop(x + self.pos_embed1) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stage1, x) else: x = self.stage1(x) if self.patch_embed2 is not None: x = self.patch_embed2(x) if self.pos_embed2 is not None: x = self.pos_drop(x + self.pos_embed2) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stage2, x) else: x = self.stage2(x) if self.patch_embed3 is not None: x = self.patch_embed3(x) if self.pos_embed3 is not None: x = self.pos_drop(x + self.pos_embed3) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stage3, x) else: x = self.stage3(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'), 'visformer_small.in1k': _cfg(hf_hub_id='timm/')}) @register_model def visformer_tiny(pretrained=False, **kwargs) -> Visformer: model_cfg = dict(init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4.0, group=8, attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, embed_norm=nn.BatchNorm2d) model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def visformer_small(pretrained=False, **kwargs) -> Visformer: model_cfg = dict(init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4.0, group=8, attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, embed_norm=nn.BatchNorm2d) model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model # File: pytorch-image-models-main/timm/models/vision_transformer.py """""" import logging import math from collections import OrderedDict from functools import partial from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, Union, List try: from typing import Literal except ImportError: from typing_extensions import Literal import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import PatchEmbed, Mlp, DropPath, AttentionPoolLatent, RmsNorm, PatchDropout, SwiGLUPacked, trunc_normal_, lecun_normal_, resample_patch_embed, resample_abs_pos_embed, use_fused_attn, get_act_layer, get_norm_layer, LayerType from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['VisionTransformer'] _logger = logging.getLogger(__name__) class Attention(nn.Module): fused_attn: Final[bool] def __init__(self, dim: int, num_heads: int=8, qkv_bias: bool=False, qk_norm: bool=False, attn_drop: float=0.0, proj_drop: float=0.0, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x: torch.Tensor) -> torch.Tensor: (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) (q, k) = (self.q_norm(q), self.k_norm(k)) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim: int, init_values: float=1e-05, inplace: bool=False) -> None: super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): def __init__(self, dim: int, num_heads: int, mlp_ratio: float=4.0, qkv_bias: bool=False, qk_norm: bool=False, proj_drop: float=0.0, attn_drop: float=0.0, init_values: Optional[float]=None, drop_path: float=0.0, act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.LayerNorm, mlp_layer: nn.Module=Mlp) -> None: super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostBlock(nn.Module): def __init__(self, dim: int, num_heads: int, mlp_ratio: float=4.0, qkv_bias: bool=False, qk_norm: bool=False, proj_drop: float=0.0, attn_drop: float=0.0, init_values: Optional[float]=None, drop_path: float=0.0, act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.LayerNorm, mlp_layer: nn.Module=Mlp) -> None: super().__init__() self.init_values = init_values self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = mlp_layer(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.init_weights() def init_weights(self) -> None: if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x + self.drop_path1(self.norm1(self.attn(x))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class ParallelScalingBlock(nn.Module): fused_attn: Final[bool] def __init__(self, dim: int, num_heads: int, mlp_ratio: float=4.0, qkv_bias: bool=False, qk_norm: bool=False, proj_drop: float=0.0, attn_drop: float=0.0, init_values: Optional[float]=None, drop_path: float=0.0, act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.LayerNorm, mlp_layer: Optional[nn.Module]=None) -> None: super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() mlp_hidden_dim = int(mlp_ratio * dim) in_proj_out_dim = mlp_hidden_dim + 3 * dim self.in_norm = norm_layer(dim) self.in_proj = nn.Linear(dim, in_proj_out_dim, bias=qkv_bias) self.in_split = [mlp_hidden_dim] + [dim] * 3 if qkv_bias: self.register_buffer('qkv_bias', None) self.register_parameter('mlp_bias', None) else: self.register_buffer('qkv_bias', torch.zeros(3 * dim), persistent=False) self.mlp_bias = nn.Parameter(torch.zeros(mlp_hidden_dim)) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.attn_out_proj = nn.Linear(dim, dim) self.mlp_drop = nn.Dropout(proj_drop) self.mlp_act = act_layer() self.mlp_out_proj = nn.Linear(mlp_hidden_dim, dim) self.ls = LayerScale(dim, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: (B, N, C) = x.shape y = self.in_norm(x) if self.mlp_bias is not None: y = F.linear(y, self.in_proj.weight, torch.cat((self.qkv_bias, self.mlp_bias))) else: y = self.in_proj(y) (x_mlp, q, k, v) = torch.split(y, self.in_split, dim=-1) q = self.q_norm(q.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) k = self.k_norm(k.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) v = v.view(B, N, self.num_heads, self.head_dim).transpose(1, 2) if self.fused_attn: x_attn = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_attn = attn @ v x_attn = x_attn.transpose(1, 2).reshape(B, N, C) x_attn = self.attn_out_proj(x_attn) x_mlp = self.mlp_act(x_mlp) x_mlp = self.mlp_drop(x_mlp) x_mlp = self.mlp_out_proj(x_mlp) y = self.drop_path(self.ls(x_attn + x_mlp)) x = x + y return x class ParallelThingsBlock(nn.Module): def __init__(self, dim: int, num_heads: int, num_parallel: int=2, mlp_ratio: float=4.0, qkv_bias: bool=False, qk_norm: bool=False, init_values: Optional[float]=None, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, act_layer: nn.Module=nn.GELU, norm_layer: nn.Module=nn.LayerNorm, mlp_layer: nn.Module=Mlp) -> None: super().__init__() self.num_parallel = num_parallel self.attns = nn.ModuleList() self.ffns = nn.ModuleList() for _ in range(num_parallel): self.attns.append(nn.Sequential(OrderedDict([('norm', norm_layer(dim)), ('attn', Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer)), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0.0 else nn.Identity())]))) self.ffns.append(nn.Sequential(OrderedDict([('norm', norm_layer(dim)), ('mlp', mlp_layer(dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0.0 else nn.Identity())]))) def _forward_jit(self, x: torch.Tensor) -> torch.Tensor: x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0) x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0) return x @torch.jit.ignore def _forward(self, x: torch.Tensor) -> torch.Tensor: x = x + sum((attn(x) for attn in self.attns)) x = x + sum((ffn(x) for ffn in self.ffns)) return x def forward(self, x: torch.Tensor) -> torch.Tensor: if torch.jit.is_scripting() or torch.jit.is_tracing(): return self._forward_jit(x) else: return self._forward(x) def global_pool_nlc(x: torch.Tensor, pool_type: str='token', num_prefix_tokens: int=1, reduce_include_prefix: bool=False): if not pool_type: return x if pool_type == 'token': x = x[:, 0] else: x = x if reduce_include_prefix else x[:, num_prefix_tokens:] if pool_type == 'avg': x = x.mean(dim=1) elif pool_type == 'avgmax': x = 0.5 * (x.amax(dim=1) + x.mean(dim=1)) elif pool_type == 'max': x = x.amax(dim=1) else: assert not pool_type, f'Unknown pool type {pool_type}' return x class VisionTransformer(nn.Module): dynamic_img_size: Final[bool] def __init__(self, img_size: Union[int, Tuple[int, int]]=224, patch_size: Union[int, Tuple[int, int]]=16, in_chans: int=3, num_classes: int=1000, global_pool: Literal['', 'avg', 'avgmax', 'max', 'token', 'map']='token', embed_dim: int=768, depth: int=12, num_heads: int=12, mlp_ratio: float=4.0, qkv_bias: bool=True, qk_norm: bool=False, init_values: Optional[float]=None, class_token: bool=True, pos_embed: str='learn', no_embed_class: bool=False, reg_tokens: int=0, pre_norm: bool=False, fc_norm: Optional[bool]=None, dynamic_img_size: bool=False, dynamic_img_pad: bool=False, drop_rate: float=0.0, pos_drop_rate: float=0.0, patch_drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, weight_init: Literal['skip', 'jax', 'jax_nlhb', 'moco', '']='', fix_init: bool=False, embed_layer: Callable=PatchEmbed, norm_layer: Optional[LayerType]=None, act_layer: Optional[LayerType]=None, block_fn: Type[nn.Module]=Block, mlp_layer: Type[nn.Module]=Mlp) -> None: super().__init__() assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map') assert class_token or global_pool != 'token' assert pos_embed in ('', 'none', 'learn') use_fc_norm = global_pool in ('avg', 'avgmax', 'max') if fc_norm is None else fc_norm norm_layer = get_norm_layer(norm_layer) or partial(nn.LayerNorm, eps=1e-06) act_layer = get_act_layer(act_layer) or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.num_prefix_tokens = 1 if class_token else 0 self.num_prefix_tokens += reg_tokens self.num_reg_tokens = reg_tokens self.has_class_token = class_token self.no_embed_class = no_embed_class self.dynamic_img_size = dynamic_img_size self.grad_checkpointing = False embed_args = {} if dynamic_img_size: embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm, dynamic_img_pad=dynamic_img_pad, **embed_args) num_patches = self.patch_embed.num_patches reduction = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.reg_token = nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens if not pos_embed or pos_embed == 'none': self.pos_embed = None else: self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * 0.02) self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout(patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.Sequential(*[block_fn(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer) for i in range(depth)]) self.feature_info = [dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=reduction) for i in range(depth)] self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity() if global_pool == 'map': self.attn_pool = AttentionPoolLatent(self.embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, norm_layer=norm_layer) else: self.attn_pool = None self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) if fix_init: self.fix_init_weight() def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for (layer_id, layer) in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def init_weights(self, mode: str='') -> None: assert mode in ('jax', 'jax_nlhb', 'moco', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.0 if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=0.02) if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-06) if self.reg_token is not None: nn.init.normal_(self.reg_token, std=1e-06) named_apply(get_init_weights_vit(mode, head_bias), self) def _init_weights(self, m: nn.Module) -> None: init_weights_vit_timm(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path: str, prefix: str='') -> None: _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self) -> Set: return {'pos_embed', 'cls_token', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse: bool=False) -> Dict: return dict(stem='^cls_token|pos_embed|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool=True) -> None: self.grad_checkpointing = enable if hasattr(self.patch_embed, 'set_grad_checkpointing'): self.patch_embed.set_grad_checkpointing(enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map') if global_pool == 'map' and self.attn_pool is None: assert False, 'Cannot currently add attention pooling in reset_classifier().' elif global_pool != 'map' and self.attn_pool is not None: self.attn_pool = None self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def set_input_size(self, img_size: Optional[Tuple[int, int]]=None, patch_size: Optional[Tuple[int, int]]=None): prev_grid_size = self.patch_embed.grid_size self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) if self.pos_embed is not None: num_prefix_tokens = 0 if self.no_embed_class else self.num_prefix_tokens num_new_tokens = self.patch_embed.num_patches + num_prefix_tokens if num_new_tokens != self.pos_embed.shape[1]: self.pos_embed = nn.Parameter(resample_abs_pos_embed(self.pos_embed, new_size=self.patch_embed.grid_size, old_size=prev_grid_size, num_prefix_tokens=num_prefix_tokens, verbose=True)) def _pos_embed(self, x: torch.Tensor) -> torch.Tensor: if self.pos_embed is None: return x.view(x.shape[0], -1, x.shape[-1]) if self.dynamic_img_size: (B, H, W, C) = x.shape pos_embed = resample_abs_pos_embed(self.pos_embed, (H, W), num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens) x = x.view(B, -1, C) else: pos_embed = self.pos_embed to_cat = [] if self.cls_token is not None: to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) if self.reg_token is not None: to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) if self.no_embed_class: x = x + pos_embed if to_cat: x = torch.cat(to_cat + [x], dim=1) else: if to_cat: x = torch.cat(to_cat + [x], dim=1) x = x + pos_embed return self.pos_drop(x) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, return_prefix_tokens: bool=False, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x) if i in take_indices: intermediates.append(self.norm(x) if norm else x) if self.num_prefix_tokens: prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] if reshape: (H, W) = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if not torch.jit.is_scripting() and return_prefix_tokens: intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def get_intermediate_layers(self, x: torch.Tensor, n: Union[int, List[int], Tuple[int]]=1, reshape: bool=False, return_prefix_tokens: bool=False, norm: bool=False) -> List[torch.Tensor]: return self.forward_intermediates(x, n, return_prefix_tokens=return_prefix_tokens, norm=norm, output_fmt='NCHW' if reshape else 'NLC', intermediates_only=True) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def pool(self, x: torch.Tensor, pool_type: Optional[str]=None) -> torch.Tensor: if self.attn_pool is not None: x = self.attn_pool(x) return x pool_type = self.global_pool if pool_type is None else pool_type x = global_pool_nlc(x, pool_type=pool_type, num_prefix_tokens=self.num_prefix_tokens) return x def forward_head(self, x: torch.Tensor, pre_logits: bool=False) -> torch.Tensor: x = self.pool(x) x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def init_weights_vit_timm(module: nn.Module, name: str='') -> None: if isinstance(module, nn.Linear): trunc_normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_jax(module: nn.Module, name: str='', head_bias: float=0.0) -> None: if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.normal_(module.bias, std=1e-06) if 'mlp' in name else nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_moco(module: nn.Module, name: str='') -> None: if isinstance(module, nn.Linear): if 'qkv' in name: val = math.sqrt(6.0 / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def get_init_weights_vit(mode: str='jax', head_bias: float=0.0) -> Callable: if 'jax' in mode: return partial(init_weights_vit_jax, head_bias=head_bias) elif 'moco' in mode: return init_weights_vit_moco else: return init_weights_vit_timm def resize_pos_embed(posemb: torch.Tensor, posemb_new: torch.Tensor, num_prefix_tokens: int=1, gs_new: Tuple[int, int]=(), interpolation: str='bicubic', antialias: bool=False) -> torch.Tensor: ntok_new = posemb_new.shape[1] - num_prefix_tokens ntok_old = posemb.shape[1] - num_prefix_tokens gs_old = [int(math.sqrt(ntok_old))] * 2 if not len(gs_new): gs_new = [int(math.sqrt(ntok_new))] * 2 return resample_abs_pos_embed(posemb, gs_new, gs_old, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str='') -> None: import numpy as np def _n2p(w, t=True, idx=None): if idx is not None: w = w[idx] if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) interpolation = 'bilinear' antialias = False big_vision = False if not prefix: if 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' elif 'params/embedding/kernel' in w: prefix = 'params/' big_vision = True elif 'params/img/embedding/kernel' in w: prefix = 'params/img/' big_vision = True if hasattr(model.patch_embed, 'backbone'): backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for (i, stage) in enumerate(backbone.stages): for (j, block) in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv(model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) if embed_conv_w.shape[-2:] != model.patch_embed.proj.weight.shape[-2:]: embed_conv_w = resample_patch_embed(embed_conv_w, model.patch_embed.proj.weight.shape[-2:], interpolation=interpolation, antialias=antialias, verbose=True) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) if model.cls_token is not None: model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) if big_vision: pos_embed_w = _n2p(w[f'{prefix}pos_embedding'], t=False) else: pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: old_shape = pos_embed_w.shape num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) pos_embed_w = resample_abs_pos_embed(pos_embed_w, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if isinstance(model.head, nn.Linear) and f'{prefix}head/bias' in w and (model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]): model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) if model.attn_pool is not None: block_prefix = f'{prefix}MAPHead_0/' mha_prefix = block_prefix + f'MultiHeadDotProductAttention_0/' model.attn_pool.latent.copy_(_n2p(w[f'{block_prefix}probe'], t=False)) model.attn_pool.kv.weight.copy_(torch.cat([_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('key', 'value')])) model.attn_pool.kv.bias.copy_(torch.cat([_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('key', 'value')])) model.attn_pool.q.weight.copy_(_n2p(w[f'{mha_prefix}query/kernel'], t=False).flatten(1).T) model.attn_pool.q.bias.copy_(_n2p(w[f'{mha_prefix}query/bias'], t=False).reshape(-1)) model.attn_pool.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) model.attn_pool.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) model.attn_pool.norm.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) model.attn_pool.norm.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) for r in range(2): getattr(model.attn_pool.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/kernel'])) getattr(model.attn_pool.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/bias'])) (mha_sub, b_sub, ln1_sub) = (0, 0, 1) if big_vision else (1, 3, 2) for (i, block) in enumerate(model.blocks.children()): if f'{prefix}Transformer/encoderblock/LayerNorm_0/scale' in w: block_prefix = f'{prefix}Transformer/encoderblock/' idx = i else: block_prefix = f'{prefix}Transformer/encoderblock_{i}/' idx = None mha_prefix = block_prefix + f'MultiHeadDotProductAttention_{mha_sub}/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'], idx=idx)) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'], idx=idx)) block.attn.qkv.weight.copy_(torch.cat([_n2p(w[f'{mha_prefix}{n}/kernel'], t=False, idx=idx).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([_n2p(w[f'{mha_prefix}{n}/bias'], t=False, idx=idx).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel'], idx=idx).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'], idx=idx)) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/scale'], idx=idx)) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/bias'], idx=idx)) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/kernel'], idx=idx)) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/bias'], idx=idx)) def _convert_openai_clip(state_dict: Dict[str, torch.Tensor], model: VisionTransformer, prefix: str='visual.') -> Dict[str, torch.Tensor]: out_dict = {} swaps = [('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'), ('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'), ('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2')] for (k, v) in state_dict.items(): if not k.startswith(prefix): continue k = k.replace(prefix, '') for sp in swaps: k = k.replace(sp[0], sp[1]) if k == 'proj': k = 'head.weight' v = v.transpose(0, 1) out_dict['head.bias'] = torch.zeros(v.shape[0]) elif k == 'class_embedding': k = 'cls_token' v = v.unsqueeze(0).unsqueeze(1) elif k == 'pos_embed': v = v.unsqueeze(0) out_dict[k] = v return out_dict def _convert_dinov2(state_dict: Dict[str, torch.Tensor], model: VisionTransformer) -> Dict[str, torch.Tensor]: import re out_dict = {} state_dict.pop('mask_token', None) if 'register_tokens' in state_dict: out_dict['reg_token'] = state_dict.pop('register_tokens') out_dict['cls_token'] = state_dict.pop('cls_token') + state_dict['pos_embed'][:, 0] out_dict['pos_embed'] = state_dict.pop('pos_embed')[:, 1:] for (k, v) in state_dict.items(): if re.match('blocks\\.(\\d+)\\.mlp\\.w12\\.(?:weight|bias)', k): out_dict[k.replace('w12', 'fc1')] = v continue elif re.match('blocks\\.(\\d+)\\.mlp\\.w3\\.(?:weight|bias)', k): out_dict[k.replace('w3', 'fc2')] = v continue out_dict[k] = v return out_dict def checkpoint_filter_fn(state_dict: Dict[str, torch.Tensor], model: VisionTransformer, adapt_layer_scale: bool=False, interpolation: str='bicubic', antialias: bool=True) -> Dict[str, torch.Tensor]: import re out_dict = {} state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) prefix = '' if 'visual.class_embedding' in state_dict: state_dict = _convert_openai_clip(state_dict, model) elif 'module.visual.class_embedding' in state_dict: state_dict = _convert_openai_clip(state_dict, model, prefix='module.visual.') elif 'mask_token' in state_dict: state_dict = _convert_dinov2(state_dict, model) elif 'encoder' in state_dict: state_dict = state_dict['encoder'] prefix = 'module.' elif 'visual.trunk.pos_embed' in state_dict or 'visual.trunk.blocks.0.norm1.weight' in state_dict: prefix = 'visual.trunk.' if 'visual.head.proj.weight' in state_dict and isinstance(model.head, nn.Linear): out_dict['head.weight'] = state_dict['visual.head.proj.weight'] out_dict['head.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) if prefix: state_dict = {k[len(prefix):]: v for (k, v) in state_dict.items() if k.startswith(prefix)} for (k, v) in state_dict.items(): if 'patch_embed.proj.weight' in k: (O, I, H, W) = model.patch_embed.proj.weight.shape if len(v.shape) < 4: (O, I, H, W) = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed(v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed(v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True) elif adapt_layer_scale and 'gamma_' in k: k = re.sub('gamma_([0-9])', 'ls\\1.gamma', k) elif 'pre_logits' in k: continue out_dict[k] = v return out_dict def _cfg(url: str='', **kwargs) -> Dict[str, Any]: return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = {'vit_base_patch16_224.augreg2_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'vit_base_patch16_384.augreg2_in21k_ft_in1k': _cfg(), 'vit_base_patch8_224.augreg2_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'vit_tiny_patch16_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_tiny_patch16_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch32_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch32_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch16_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch8_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.orig_in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', hf_hub_id='timm/'), 'vit_base_patch16_384.orig_in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch32_384.orig_in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch16_224.augreg_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch14_224.untrained': _cfg(url=''), 'vit_huge_patch14_224.untrained': _cfg(url=''), 'vit_giant_patch14_224.untrained': _cfg(url=''), 'vit_gigantic_patch14_224.untrained': _cfg(url=''), 'vit_base_patch32_224.orig_in21k': _cfg(hf_hub_id='timm/', num_classes=0), 'vit_base_patch16_224.orig_in21k': _cfg(hf_hub_id='timm/', num_classes=0), 'vit_large_patch32_224.orig_in21k': _cfg(hf_hub_id='timm/', num_classes=0), 'vit_large_patch16_224.orig_in21k': _cfg(hf_hub_id='timm/', num_classes=0), 'vit_huge_patch14_224.orig_in21k': _cfg(hf_hub_id='timm/', num_classes=0), 'vit_tiny_patch16_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch32_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch16_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch32_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch16_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch8_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_large_patch16_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch32_224.sam_in1k': _cfg(url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True, hf_hub_id='timm/'), 'vit_base_patch16_224.sam_in1k': _cfg(url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True, hf_hub_id='timm/'), 'vit_small_patch16_224.dino': _cfg(url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_small_patch8_224.dino': _cfg(url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch16_224.dino': _cfg(url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch8_224.dino': _cfg(url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_small_patch14_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch14_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_large_patch14_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_giant_patch14_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_small_patch14_reg4_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch14_reg4_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_large_patch14_reg4_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_giant_patch14_reg4_dinov2.lvd142m': _cfg(url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch16_224_miil.in21k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', hf_hub_id='timm/', mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), crop_pct=0.875, interpolation='bilinear', num_classes=11221), 'vit_base_patch16_224_miil.in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', hf_hub_id='timm/', mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), crop_pct=0.875, interpolation='bilinear'), 'vit_base_patch16_rpn_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth', hf_hub_id='timm/'), 'vit_medium_patch16_gap_240.sw_in12k': _cfg(hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821), 'vit_medium_patch16_gap_256.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_gap_384.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=0.95, crop_mode='squash'), 'vit_base_patch16_gap_224': _cfg(), 'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), 'vit_base_patch32_clip_448.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)), 'vit_base_patch16_clip_224.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg(mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.openai_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.openai_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.openai_ft_in12k_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.laion2b_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch16_clip_384.laion2b_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in1k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in1k': _cfg(hf_hub_id='', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.openai_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_384.openai_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in1k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch32_clip_224.laion2b_ft_in12k': _cfg(mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_base_patch16_clip_224.laion2b_ft_in12k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg(hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=11821), 'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch32_clip_224.openai_ft_in12k': _cfg(mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_base_patch16_clip_224.openai_ft_in12k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.openai_ft_in12k': _cfg(hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch32_clip_224.laion2b': _cfg(hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.laion2b': _cfg(hf_hub_id='laion/CLIP-ViT-B-16-laion2B-s34B-b88K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.laion2b': _cfg(hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.laion2b': _cfg(hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_giant_patch14_clip_224.laion2b': _cfg(hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_gigantic_patch14_clip_224.laion2b': _cfg(hf_hub_id='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), 'vit_base_patch32_clip_224.datacompxl': _cfg(hf_hub_id='laion/CLIP-ViT-B-32-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch32_clip_256.datacompxl': _cfg(hf_hub_id='laion/CLIP-ViT-B-32-256x256-DataComp-s34B-b86K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 256, 256), num_classes=512), 'vit_base_patch16_clip_224.datacompxl': _cfg(hf_hub_id='laion/CLIP-ViT-B-16-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.datacompxl': _cfg(hf_hub_id='laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_base_patch16_clip_224.dfn2b': _cfg(hf_hub_id='apple/DFN2B-CLIP-ViT-B-16', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.dfn2b': _cfg(hf_hub_id='apple/DFN2B-CLIP-ViT-L-14', hf_hub_filename='open_clip_pytorch_model.bin', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.dfn5b': _cfg(hf_hub_id='apple/DFN5B-CLIP-ViT-H-14', hf_hub_filename='open_clip_pytorch_model.bin', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_huge_patch14_clip_378.dfn5b': _cfg(hf_hub_id='apple/DFN5B-CLIP-ViT-H-14-378', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, notes=('natively QuickGELU, use quickgelu model variant for original results',), crop_pct=1.0, input_size=(3, 378, 378), num_classes=1024), 'vit_base_patch32_clip_224.metaclip_2pt5b': _cfg(hf_hub_id='facebook/metaclip-b32-fullcc2.5b', hf_hub_filename='metaclip_b32_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_clip_224.metaclip_2pt5b': _cfg(hf_hub_id='facebook/metaclip-b16-fullcc2.5b', hf_hub_filename='metaclip_b16_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.metaclip_2pt5b': _cfg(hf_hub_id='facebook/metaclip-l14-fullcc2.5b', hf_hub_filename='metaclip_l14_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.metaclip_2pt5b': _cfg(hf_hub_id='facebook/metaclip-h14-fullcc2.5b', hf_hub_filename='metaclip_h14_fullcc2.5b.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_base_patch32_clip_224.openai': _cfg(hf_hub_id='timm/vit_base_patch32_clip_224.openai', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.openai': _cfg(hf_hub_id='timm/vit_base_patch16_clip_224.openai', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_large_patch14_clip_224.openai': _cfg(hf_hub_id='timm/vit_large_patch14_clip_224.openai', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_large_patch14_clip_336.openai': _cfg(hf_hub_id='timm/vit_large_patch14_clip_336.openai', hf_hub_filename='open_clip_pytorch_model.bin', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), num_classes=768), 'vit_base_patch32_plus_256.untrained': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), 'vit_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), 'vit_small_patch16_36x1_224.untrained': _cfg(url=''), 'vit_small_patch16_18x2_224.untrained': _cfg(url=''), 'vit_base_patch16_18x2_224.untrained': _cfg(url=''), 'eva_large_patch14_196.in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in22k_in1k': _cfg(hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_large_patch14_196.in22k_ft_in1k': _cfg(hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in1k': _cfg(hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'flexivit_small.1200ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.600ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.300ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1200ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.600ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.300ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1000ep_in21k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_1000ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.300ep_in21k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_large.1200ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.600ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.300ep_in1k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.patch16_in21k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/vit_b16_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.patch30_in21k': _cfg(url='https://storage.googleapis.com/big_vision/flexivit/vit_b30_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'vit_base_patch16_xp_224.untrained': _cfg(url=''), 'vit_large_patch14_xp_224.untrained': _cfg(url=''), 'vit_huge_patch14_xp_224.untrained': _cfg(url=''), 'vit_base_patch16_224.mae': _cfg(url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_large_patch16_224.mae': _cfg(url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_224.mae': _cfg(url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in1k_ijepa': _cfg(url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in22k_ijepa': _cfg(url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch16_gap_448.in1k_ijepa': _cfg(url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar', license='cc-by-nc-4.0', input_size=(3, 448, 448), crop_pct=1.0, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_giant_patch16_gap_224.in22k_ijepa': _cfg(url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch16_siglip_224.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_base_patch16_siglip_256.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_384.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_512.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP-512', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 512, 512), num_classes=0), 'vit_large_patch16_siglip_256.webli': _cfg(hf_hub_id='timm/ViT-L-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_384.webli': _cfg(hf_hub_id='timm/ViT-L-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch14_siglip_224.webli': _cfg(hf_hub_id='timm/ViT-SO400M-14-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_so400m_patch14_siglip_384.webli': _cfg(hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_gap_224.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_base_patch16_siglip_gap_256.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_gap_384.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_gap_512.webli': _cfg(hf_hub_id='timm/ViT-B-16-SigLIP-512', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 512, 512), num_classes=0), 'vit_large_patch16_siglip_gap_256.webli': _cfg(hf_hub_id='timm/ViT-L-16-SigLIP-256', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_gap_384.webli': _cfg(hf_hub_id='timm/ViT-L-16-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch14_siglip_gap_224.webli': _cfg(hf_hub_id='timm/ViT-SO400M-14-SigLIP', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0), 'vit_so400m_patch14_siglip_gap_224.pali_mix': _cfg(hf_hub_id='google/paligemma-3b-mix-224-jax', hf_hub_filename='paligemma-3b-mix-224.npz', custom_load='hf', num_classes=0), 'vit_so400m_patch14_siglip_gap_224.pali_pt': _cfg(hf_hub_id='google/paligemma-3b-pt-224-jax', hf_hub_filename='paligemma-3b-pt-224.npz', custom_load='hf', num_classes=0), 'vit_so400m_patch14_siglip_gap_384.webli': _cfg(hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 384, 384), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali_mix': _cfg(hf_hub_id='google/paligemma-3b-mix-448-jax', hf_hub_filename='paligemma-3b-mix-448.npz', custom_load='hf', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali_pt': _cfg(hf_hub_id='google/paligemma-3b-pt-448-jax', hf_hub_filename='paligemma-3b-pt-448.npz', custom_load='hf', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_896.pali_pt': _cfg(hf_hub_id='google/paligemma-3b-pt-896-jax', hf_hub_filename='paligemma-3b-pt-896.npz', custom_load='hf', input_size=(3, 896, 896), crop_pct=1.0, num_classes=0), 'vit_xsmall_patch16_clip_224.tinyclip_yfcc15m': _cfg(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_medium_patch32_clip_224.tinyclip_laion400m': _cfg(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_medium_patch16_clip_224.tinyclip_yfcc15m': _cfg(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_betwixt_patch32_clip_224.tinyclip_laion400m': _cfg(hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_wee_patch16_reg1_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_pwee_patch16_reg1_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_little_patch16_reg1_gap_256.sbb_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_little_patch16_reg1_gap_256.sbb_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_little_patch16_reg4_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg1_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg4_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg4_gap_256.sbb_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_betwixt_patch16_reg1_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb_in12k': _cfg(hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_reg4_gap_256.untrained': _cfg(input_size=(3, 256, 256)), 'vit_so150m_patch16_reg4_gap_256.untrained': _cfg(input_size=(3, 256, 256)), 'vit_so150m_patch16_reg4_map_256.untrained': _cfg(input_size=(3, 256, 256)), 'test_vit.r160_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 160, 160), crop_pct=0.875)} _quick_gelu_cfgs = ['vit_large_patch14_clip_224.dfn2b', 'vit_huge_patch14_clip_224.dfn5b', 'vit_huge_patch14_clip_378.dfn5b', 'vit_base_patch32_clip_224.metaclip_2pt5b', 'vit_base_patch16_clip_224.metaclip_2pt5b', 'vit_large_patch14_clip_224.metaclip_2pt5b', 'vit_huge_patch14_clip_224.metaclip_2pt5b', 'vit_base_patch32_clip_224.openai', 'vit_base_patch16_clip_224.openai', 'vit_large_patch14_clip_224.openai', 'vit_large_patch14_clip_336.openai'] default_cfgs.update({n.replace('_clip_', '_clip_quickgelu_'): default_cfgs[n] for n in _quick_gelu_cfgs}) default_cfgs = generate_default_cfgs(default_cfgs) def _create_vision_transformer(variant: str, pretrained: bool=False, **kwargs) -> VisionTransformer: out_indices = kwargs.pop('out_indices', 3) if 'flexi' in variant: _filter_fn = partial(checkpoint_filter_fn, interpolation='bilinear', antialias=False) else: _filter_fn = checkpoint_filter_fn strict = True if 'siglip' in variant and kwargs.get('global_pool', None) != 'map': strict = False return build_model_with_cfg(VisionTransformer, variant, pretrained, pretrained_filter_fn=_filter_fn, pretrained_strict=strict, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) @register_model def vit_tiny_patch16_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_tiny_patch16_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch32_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch32_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch8_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch8_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch32_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch32_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16) model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1408, mlp_ratio=48 / 11, depth=40, num_heads=16) model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1664, mlp_ratio=64 / 13, depth=48, num_heads=16) model = _create_vision_transformer('vit_gigantic_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_224_miil(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False) model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_240(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-06, fc_norm=False) model = _create_vision_transformer('vit_medium_patch16_gap_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-06, fc_norm=False) model = _create_vision_transformer('vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-06, fc_norm=False) model = _create_vision_transformer('vit_medium_patch16_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=640, depth=12, num_heads=10, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-06, fc_norm=False) model = _create_vision_transformer('vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_gap_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_base_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_gap_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_huge_patch14_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch16_gap_448(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_huge_patch16_gap_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch16_gap_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=48 / 11, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_giant_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_xsmall_patch16_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(embed_dim=256, depth=10, num_heads=4, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_xsmall_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch32_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=512, depth=12, num_heads=8, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_medium_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(embed_dim=512, depth=12, num_heads=8, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_medium_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch32_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=640, depth=12, num_heads=10, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_betwixt_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_base_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_base_patch32_clip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_base_patch32_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_448(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_base_patch32_clip_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_base_patch16_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_336(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_huge_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_336(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_huge_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_378(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_huge_patch14_clip_378', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1408, mlp_ratio=48 / 11, depth=40, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_clip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1664, mlp_ratio=64 / 13, depth=48, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer('vit_gigantic_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_quickgelu_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm, act_layer='quick_gelu') model = _create_vision_transformer('vit_base_patch32_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_quickgelu_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm, act_layer='quick_gelu') model = _create_vision_transformer('vit_base_patch16_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_quickgelu_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, act_layer='quick_gelu') model = _create_vision_transformer('vit_large_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_quickgelu_336(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, act_layer='quick_gelu') model = _create_vision_transformer('vit_large_patch14_clip_quickgelu_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_quickgelu_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, act_layer='quick_gelu') model = _create_vision_transformer('vit_huge_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_quickgelu_378(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, act_layer='quick_gelu') model = _create_vision_transformer('vit_huge_patch14_clip_quickgelu_378', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_plus_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, init_values=1e-05) model = _create_vision_transformer('vit_base_patch32_plus_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_plus_240(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, init_values=1e-05) model = _create_vision_transformer('vit_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rpn_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, init_values=1e-05, class_token=False, block_fn=ResPostBlock, global_pool='avg') model = _create_vision_transformer('vit_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_36x1_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=6, init_values=1e-05) model = _create_vision_transformer('vit_small_patch16_36x1_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_18x2_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=18, num_heads=6, init_values=1e-05, block_fn=ParallelThingsBlock) model = _create_vision_transformer('vit_small_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_18x2_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-05, block_fn=ParallelThingsBlock) model = _create_vision_transformer('vit_base_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_large_patch14_196(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') model = _create_vision_transformer('eva_large_patch14_196', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_large_patch14_336(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') model = _create_vision_transformer('eva_large_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_small(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True) model = _create_vision_transformer('flexivit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_base(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True) model = _create_vision_transformer('flexivit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_large(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True) model = _create_vision_transformer('flexivit_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_xp_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True) model = _create_vision_transformer('vit_base_patch16_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_xp_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True) model = _create_vision_transformer('vit_large_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_xp_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True) model = _create_vision_transformer('vit_huge_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch14_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-05) model = _create_vision_transformer('vit_small_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch14_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-05) model = _create_vision_transformer('vit_base_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-05) model = _create_vision_transformer('vit_large_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-05, mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, act_layer=nn.SiLU) model = _create_vision_transformer('vit_giant_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch14_reg4_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-05, reg_tokens=4, no_embed_class=True) model = _create_vision_transformer('vit_small_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch14_reg4_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-05, reg_tokens=4, no_embed_class=True) model = _create_vision_transformer('vit_base_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_reg4_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-05, reg_tokens=4, no_embed_class=True) model = _create_vision_transformer('vit_large_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_reg4_dinov2(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-05, mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, act_layer=nn.SiLU, reg_tokens=4, no_embed_class=True) model = _create_vision_transformer('vit_giant_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map') model = _create_vision_transformer('vit_base_patch16_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map') model = _create_vision_transformer('vit_base_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map') model = _create_vision_transformer('vit_base_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_512(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map') model = _create_vision_transformer('vit_base_patch16_siglip_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map') model = _create_vision_transformer('vit_large_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map') model = _create_vision_transformer('vit_large_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map') model = _create_vision_transformer('vit_so400m_patch14_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map') model = _create_vision_transformer('vit_so400m_patch14_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_base_patch16_siglip_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_base_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_base_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_512(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_base_patch16_siglip_gap_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_large_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_gap_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_large_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_224(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_so400m_patch14_siglip_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_so400m_patch14_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_448(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_so400m_patch14_siglip_gap_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_896(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_so400m_patch14_siglip_gap_896', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_wee_patch16_reg1_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=256, depth=14, num_heads=4, init_values=1e-05, mlp_ratio=5, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg') model = _create_vision_transformer('vit_wee_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_pwee_patch16_reg1_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=256, depth=16, num_heads=4, init_values=1e-05, mlp_ratio=5, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', block_fn=ParallelScalingBlock) model = _create_vision_transformer('vit_pwee_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_little_patch16_reg1_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-05, mlp_ratio=5.6, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg') model = _create_vision_transformer('vit_little_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_little_patch16_reg4_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-05, mlp_ratio=5.6, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg') model = _create_vision_transformer('vit_little_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_reg1_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, init_values=1e-05, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg') model = _create_vision_transformer('vit_medium_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_reg4_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, init_values=1e-05, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg') model = _create_vision_transformer('vit_medium_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_mediumd_patch16_reg4_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=20, num_heads=8, init_values=1e-05, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg') model = _create_vision_transformer('vit_mediumd_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_mediumd_patch16_reg4_gap_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=512, depth=20, num_heads=8, init_values=1e-05, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg') model = _create_vision_transformer('vit_mediumd_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_reg1_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-05, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg') model = _create_vision_transformer('vit_betwixt_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_reg4_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-05, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg') model = _create_vision_transformer('vit_betwixt_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_reg4_gap_384(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-05, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg') model = _create_vision_transformer('vit_betwixt_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_reg4_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, no_embed_class=True, global_pool='avg', reg_tokens=4) model = _create_vision_transformer('vit_base_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m_patch16_reg4_map_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, class_token=False, reg_tokens=4, global_pool='map') model = _create_vision_transformer('vit_so150m_patch16_reg4_map_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m_patch16_reg4_gap_256(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, class_token=False, reg_tokens=4, global_pool='avg', fc_norm=False) model = _create_vision_transformer('vit_so150m_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_vit(pretrained: bool=False, **kwargs) -> VisionTransformer: model_args = dict(patch_size=16, embed_dim=64, depth=6, num_heads=2, mlp_ratio=3) model = _create_vision_transformer('test_vit', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, {'vit_tiny_patch16_224_in21k': 'vit_tiny_patch16_224.augreg_in21k', 'vit_small_patch32_224_in21k': 'vit_small_patch32_224.augreg_in21k', 'vit_small_patch16_224_in21k': 'vit_small_patch16_224.augreg_in21k', 'vit_base_patch32_224_in21k': 'vit_base_patch32_224.augreg_in21k', 'vit_base_patch16_224_in21k': 'vit_base_patch16_224.augreg_in21k', 'vit_base_patch8_224_in21k': 'vit_base_patch8_224.augreg_in21k', 'vit_large_patch32_224_in21k': 'vit_large_patch32_224.orig_in21k', 'vit_large_patch16_224_in21k': 'vit_large_patch16_224.augreg_in21k', 'vit_huge_patch14_224_in21k': 'vit_huge_patch14_224.orig_in21k', 'vit_base_patch32_224_sam': 'vit_base_patch32_224.sam', 'vit_base_patch16_224_sam': 'vit_base_patch16_224.sam', 'vit_small_patch16_224_dino': 'vit_small_patch16_224.dino', 'vit_small_patch8_224_dino': 'vit_small_patch8_224.dino', 'vit_base_patch16_224_dino': 'vit_base_patch16_224.dino', 'vit_base_patch8_224_dino': 'vit_base_patch8_224.dino', 'vit_base_patch16_224_miil_in21k': 'vit_base_patch16_224_miil.in21k', 'vit_base_patch32_224_clip_laion2b': 'vit_base_patch32_clip_224.laion2b', 'vit_large_patch14_224_clip_laion2b': 'vit_large_patch14_clip_224.laion2b', 'vit_huge_patch14_224_clip_laion2b': 'vit_huge_patch14_clip_224.laion2b', 'vit_giant_patch14_224_clip_laion2b': 'vit_giant_patch14_clip_224.laion2b'}) # File: pytorch-image-models-main/timm/models/vision_transformer_hybrid.py """""" import math from functools import partial from typing import Dict, List, Optional, Tuple, Type, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import StdConv2dSame, StdConv2d, ConvNormAct, to_2tuple, to_ntuple, HybridEmbed from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model, register_model_deprecations from .resnet import resnet26d, resnet50d from .resnetv2 import ResNetV2, create_resnetv2_stem from .vision_transformer import VisionTransformer class ConvStem(nn.Sequential): def __init__(self, in_chans: int=3, depth: int=3, channels: Union[int, Tuple[int, ...]]=64, kernel_size: Union[int, Tuple[int, ...]]=3, stride: Union[int, Tuple[int, ...]]=(2, 2, 2), padding: Union[str, int, Tuple[int, ...]]='', norm_layer: Type[nn.Module]=nn.BatchNorm2d, act_layer: Type[nn.Module]=nn.ReLU): super().__init__() if isinstance(channels, int): channels = tuple([channels // 2 ** i for i in range(depth)][::-1]) kernel_size = to_ntuple(depth)(kernel_size) padding = to_ntuple(depth)(padding) assert depth == len(stride) == len(kernel_size) == len(channels) in_chs = in_chans for i in range(len(channels)): last_conv = i == len(channels) - 1 self.add_module(f'{i}', ConvNormAct(in_chs, channels[i], kernel_size=kernel_size[i], stride=stride[i], padding=padding[i], bias=last_conv, apply_norm=not last_conv, apply_act=not last_conv, norm_layer=norm_layer, act_layer=act_layer)) in_chs = channels[i] def _resnetv2(layers=(3, 4, 9), **kwargs): padding_same = kwargs.get('padding_same', True) stem_type = 'same' if padding_same else '' conv_layer = partial(StdConv2dSame, eps=1e-08) if padding_same else partial(StdConv2d, eps=1e-08) if len(layers): backbone = ResNetV2(layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), preact=False, stem_type=stem_type, conv_layer=conv_layer) else: backbone = create_resnetv2_stem(kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) return backbone def _convert_mobileclip(state_dict, model, prefix='image_encoder.model.'): out = {} for (k, v) in state_dict.items(): if not k.startswith(prefix): continue k = k.replace(prefix, '') k = k.replace('patch_emb.', 'patch_embed.backbone.') k = k.replace('block.conv', 'conv') k = k.replace('block.norm', 'bn') k = k.replace('post_transformer_norm.', 'norm.') k = k.replace('pre_norm_mha.0', 'norm1') k = k.replace('pre_norm_mha.1', 'attn') k = k.replace('pre_norm_ffn.0', 'norm2') k = k.replace('pre_norm_ffn.1', 'mlp.fc1') k = k.replace('pre_norm_ffn.4', 'mlp.fc2') k = k.replace('qkv_proj.', 'qkv.') k = k.replace('out_proj.', 'proj.') k = k.replace('transformer.', 'blocks.') if k == 'pos_embed.pos_embed.pos_embed': k = 'pos_embed' v = v.squeeze(0) if 'classifier.proj' in k: bias_k = k.replace('classifier.proj', 'head.bias') k = k.replace('classifier.proj', 'head.weight') v = v.T out[bias_k] = torch.zeros(v.shape[0]) out[k] = v return out def checkpoint_filter_fn(state_dict: Dict[str, torch.Tensor], model: VisionTransformer, interpolation: str='bicubic', antialias: bool=True) -> Dict[str, torch.Tensor]: from .vision_transformer import checkpoint_filter_fn as _filter_fn if 'image_encoder.model.patch_emb.0.block.conv.weight' in state_dict: state_dict = _convert_mobileclip(state_dict, model) return _filter_fn(state_dict, model, interpolation=interpolation, antialias=antialias) def _create_vision_transformer_hybrid(variant, backbone, embed_args=None, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) embed_args = embed_args or {} embed_layer = partial(HybridEmbed, backbone=backbone, **embed_args) kwargs.setdefault('embed_layer', embed_layer) kwargs.setdefault('patch_size', 1) return build_model_with_cfg(VisionTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True, first_conv='patch_embed.backbone.conv'), 'vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_small_r26_s32_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_r26_s32_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_base_r26_s32_224.untrained': _cfg(), 'vit_base_r50_s16_384.orig_in21k_ft_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_r50_s32_224.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_r50_s32_384.augreg_in21k_ft_in1k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_tiny_r_s16_p8_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv', custom_load=True), 'vit_small_r26_s32_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), 'vit_base_r50_s16_224.orig_in21k': _cfg(hf_hub_id='timm/', num_classes=0, crop_pct=0.9), 'vit_large_r50_s32_224.augreg_in21k': _cfg(url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), 'vit_small_resnet26d_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_small_resnet50d_s16_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet26d_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet50d_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_mci_224.apple_mclip_lt': _cfg(hf_hub_id='apple/mobileclip_b_lt_timm', url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_blt.pt', num_classes=512, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), first_conv='patch_embed.backbone.0.conv'), 'vit_base_mci_224.apple_mclip': _cfg(hf_hub_id='apple/mobileclip_b_timm', url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_b.pt', num_classes=512, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), first_conv='patch_embed.backbone.0.conv')}) @register_model def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid('vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid('vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid('vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_384(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid('vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid('vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid('vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_384(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid('vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid('vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_384(pretrained=False, **kwargs) -> VisionTransformer: backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid('vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid('vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet50d_s16_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid('vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid('vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet50d_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid('vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_mci_224(pretrained=False, **kwargs) -> VisionTransformer: backbone = ConvStem(channels=(768 // 4, 768 // 4, 768), stride=(4, 2, 2), kernel_size=(4, 2, 2), padding=0, in_chans=kwargs.get('in_chans', 3), act_layer=nn.GELU) model_args = dict(embed_dim=768, depth=12, num_heads=12, no_embed_class=True) model = _create_vision_transformer_hybrid('vit_base_mci_224', backbone=backbone, embed_args=dict(proj=False), pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, {'vit_tiny_r_s16_p8_224_in21k': 'vit_tiny_r_s16_p8_224.augreg_in21k', 'vit_small_r26_s32_224_in21k': 'vit_small_r26_s32_224.augreg_in21k', 'vit_base_r50_s16_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_base_resnet50_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_large_r50_s32_224_in21k': 'vit_large_r50_s32_224.augreg_in21k', 'vit_base_resnet50_384': 'vit_base_r50_s16_384.orig_in21k_ft_in1k'}) # File: pytorch-image-models-main/timm/models/vision_transformer_relpos.py """""" import logging import math from functools import partial from typing import List, Optional, Tuple, Type, Union try: from typing import Literal except ImportError: from typing_extensions import Literal import torch import torch.nn as nn from torch.jit import Final from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn, LayerType from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply from ._registry import generate_default_cfgs, register_model from .vision_transformer import get_init_weights_vit __all__ = ['VisionTransformerRelPos'] _logger = logging.getLogger(__name__) class RelPosAttention(nn.Module): fused_attn: Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, qk_norm=False, rel_pos_cls=None, attn_drop=0.0, proj_drop=0.0, norm_layer=nn.LayerNorm): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor]=None): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) q = self.q_norm(q) k = self.k_norm(k) if self.fused_attn: if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos else: attn_bias = None x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class RelPosBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = RelPosAttention(dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x, shared_rel_pos: Optional[torch.Tensor]=None): x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostRelPosBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.init_values = init_values self.attn = RelPosAttention(dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.init_weights() def init_weights(self): if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x, shared_rel_pos: Optional[torch.Tensor]=None): x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class VisionTransformerRelPos(nn.Module): def __init__(self, img_size: Union[int, Tuple[int, int]]=224, patch_size: Union[int, Tuple[int, int]]=16, in_chans: int=3, num_classes: int=1000, global_pool: Literal['', 'avg', 'token', 'map']='avg', embed_dim: int=768, depth: int=12, num_heads: int=12, mlp_ratio: float=4.0, qkv_bias: bool=True, qk_norm: bool=False, init_values: Optional[float]=1e-06, class_token: bool=False, fc_norm: bool=False, rel_pos_type: str='mlp', rel_pos_dim: Optional[int]=None, shared_rel_pos: bool=False, drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, weight_init: Literal['skip', 'jax', 'moco', '']='skip', fix_init: bool=False, embed_layer: Type[nn.Module]=PatchEmbed, norm_layer: Optional[LayerType]=None, act_layer: Optional[LayerType]=None, block_fn: Type[nn.Module]=RelPosBlock): super().__init__() assert global_pool in ('', 'avg', 'token') assert class_token or global_pool != 'token' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-06) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.num_prefix_tokens = 1 if class_token else 0 self.grad_checkpointing = False self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) feat_size = self.patch_embed.grid_size r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens) if rel_pos_type.startswith('mlp'): if rel_pos_dim: rel_pos_args['hidden_dim'] = rel_pos_dim if 'swin' in rel_pos_type: rel_pos_args['mode'] = 'swin' rel_pos_cls = partial(RelPosMlp, **rel_pos_args) else: rel_pos_cls = partial(RelPosBias, **rel_pos_args) self.shared_rel_pos = None if shared_rel_pos: self.shared_rel_pos = rel_pos_cls(num_heads=num_heads) rel_pos_cls = None self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.ModuleList([block_fn(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.feature_info = [dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity() self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) if fix_init: self.fix_init_weight() def init_weights(self, mode=''): assert mode in ('jax', 'moco', '') if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-06) named_apply(get_init_weights_vit(mode), self) def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for (layer_id, layer) in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) @torch.jit.ignore def no_weight_decay(self): return {'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^cls_token|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, return_prefix_tokens: bool=False, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape x = self.patch_embed(x) if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x, shared_rel_pos=shared_rel_pos) if i in take_indices: intermediates.append(self.norm(x) if norm else x) if self.num_prefix_tokens: prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] if reshape: (H, W) = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if not torch.jit.is_scripting() and return_prefix_tokens: intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos) else: x = blk(x, shared_rel_pos=shared_rel_pos) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg(VisionTransformerRelPos, variant, pretrained, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth', hf_hub_id='timm/', input_size=(3, 256, 256)), 'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)), 'vit_relpos_small_patch16_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth', hf_hub_id='timm/'), 'vit_srelpos_small_patch16_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth', hf_hub_id='timm/'), 'vit_srelpos_medium_patch16_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_cls_224.untrained': _cfg(), 'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth', hf_hub_id='timm/'), 'vit_relpos_small_patch16_rpn_224.untrained': _cfg(), 'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_rpn_224.untrained': _cfg()}) @register_model def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos('vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14) model = _create_vision_transformer_relpos('vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos('vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos('vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos('vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False, rel_pos_dim=384, shared_rel_pos=True) model = _create_vision_transformer_relpos('vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=512, shared_rel_pos=True) model = _create_vision_transformer_relpos('vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=256, class_token=True, global_pool='token') model = _create_vision_transformer_relpos('vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token') model = _create_vision_transformer_relpos('vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True) model = _create_vision_transformer_relpos('vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos('vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos('vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos('vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/vision_transformer_sam.py """""" import logging from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead, Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn from torch.jit import Final from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['VisionTransformerSAM'] _logger = logging.getLogger(__name__) def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: max_rel_dist = int(2 * max(q_size, k_size) - 1) if rel_pos.shape[0] != max_rel_dist: rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear') rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] register_notrace_function(get_rel_pos) def get_decomposed_rel_pos_bias(q: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int]) -> torch.Tensor: (q_h, q_w) = q_size (k_h, k_w) = k_size Rh = get_rel_pos(q_h, k_h, rel_pos_h) Rw = get_rel_pos(q_w, k_w, rel_pos_w) (B, _, dim) = q.shape r_q = q.reshape(B, q_h, q_w, dim) rel_h = torch.einsum('bhwc,hkc->bhwk', r_q, Rh) rel_w = torch.einsum('bhwc,wkc->bhwk', r_q, Rw) attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] return attn_bias.reshape(-1, q_h * q_w, k_h * k_w) class Attention(nn.Module): fused_attn: Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=True, qk_norm=False, attn_drop=0.0, proj_drop=0.0, norm_layer=nn.LayerNorm, use_rel_pos: bool=False, input_size: Optional[Tuple[int, int]]=None, rope: Optional[nn.Module]=None): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.use_rel_pos = use_rel_pos if self.use_rel_pos: assert rope is None assert input_size is not None, 'Input size must be provided if using relative positional encoding.' self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, self.head_dim)) self.rope = rope def forward(self, x): (B, H, W, _) = x.shape N = H * W x = x.reshape(B, N, -1) qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0) (q, k) = (self.q_norm(q), self.k_norm(k)) if self.use_rel_pos: attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) else: attn_bias = None if self.rope is not None: rope = self.rope.get_embed() q = apply_rot_embed_cat(q, rope).type_as(v) k = apply_rot_embed_cat(k, rope).type_as(v) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if attn_bias is not None: attn = attn + attn_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) x = x.view(B, H, W, -1) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-05, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=True, qk_norm=False, proj_drop=0.0, attn_drop=0.0, init_values=None, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, use_rel_pos=False, window_size=0, input_size=None, rope=None): super().__init__() self.window_size = window_size self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, use_rel_pos=use_rel_pos, input_size=input_size if window_size == 0 else (window_size, window_size), rope=rope) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): (B, H, W, _) = x.shape shortcut = x x = self.norm1(x) pad_hw: Optional[Tuple[int, int]] = None if self.window_size > 0: (x, pad_hw) = window_partition(x, self.window_size) x = self.drop_path1(self.ls1(self.attn(x))) if self.window_size > 0: x = window_unpartition(x, self.window_size, (H, W), pad_hw) x = shortcut + x x = x.reshape(B, H * W, -1) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) x = x.reshape(B, H, W, -1) return x def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: (B, H, W, C) = x.shape pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) (Hp, Wp) = (H + pad_h, W + pad_w) x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return (windows, (Hp, Wp)) def window_unpartition(windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]]=None) -> torch.Tensor: (Hp, Wp) = pad_hw if pad_hw is not None else hw (H, W) = hw B = windows.shape[0] // (Hp * Wp // window_size // window_size) x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) x = x[:, :H, :W, :].contiguous() return x class VisionTransformerSAM(nn.Module): def __init__(self, img_size: int=1024, patch_size: int=16, in_chans: int=3, num_classes: int=768, embed_dim: int=768, depth: int=12, num_heads: int=12, mlp_ratio: float=4.0, qkv_bias: bool=True, qk_norm: bool=False, init_values: Optional[float]=None, pre_norm: bool=False, drop_rate: float=0.0, pos_drop_rate: float=0.0, patch_drop_rate: float=0.0, proj_drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.0, weight_init: str='', embed_layer: Callable=partial(PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False), norm_layer: Optional[Callable]=nn.LayerNorm, act_layer: Optional[Callable]=nn.GELU, block_fn: Callable=Block, mlp_layer: Callable=Mlp, use_abs_pos: bool=True, use_rel_pos: bool=False, use_rope: bool=False, window_size: int=14, global_attn_indexes: Tuple[int, ...]=(), neck_chans: int=256, global_pool: str='avg', head_hidden_size: Optional[int]=None, ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]]=None): super().__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-06) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.grad_checkpointing = False self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm) grid_size = self.patch_embed.grid_size r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size if use_abs_pos: self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout(patch_drop_rate, num_prefix_tokens=0) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() if use_rope: assert not use_rel_pos, 'ROPE and relative pos embeddings should not be enabled at same time' if ref_feat_shape is not None: assert len(ref_feat_shape) == 2 ref_feat_shape_global = to_2tuple(ref_feat_shape[0]) ref_feat_shape_window = to_2tuple(ref_feat_shape[1]) else: ref_feat_shape_global = ref_feat_shape_window = None self.rope_global = RotaryEmbeddingCat(embed_dim // num_heads, in_pixels=False, feat_shape=grid_size, ref_feat_shape=ref_feat_shape_global) self.rope_window = RotaryEmbeddingCat(embed_dim // num_heads, in_pixels=False, feat_shape=to_2tuple(window_size), ref_feat_shape=ref_feat_shape_window) else: self.rope_global = None self.rope_window = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.Sequential(*[block_fn(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer, use_rel_pos=use_rel_pos, window_size=window_size if i not in global_attn_indexes else 0, input_size=grid_size, rope=self.rope_window if i not in global_attn_indexes else self.rope_global) for i in range(depth)]) self.feature_info = [dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] if neck_chans: self.neck = nn.Sequential(nn.Conv2d(embed_dim, neck_chans, kernel_size=1, bias=False), LayerNorm2d(neck_chans), nn.Conv2d(neck_chans, neck_chans, kernel_size=3, padding=1, bias=False), LayerNorm2d(neck_chans)) self.num_features = neck_chans else: if head_hidden_size: self.neck = nn.Identity() else: self.neck = LayerNorm2d(embed_dim) neck_chans = embed_dim if head_hidden_size: self.head = NormMlpClassifierHead(neck_chans, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=drop_rate) else: self.head = ClassifierHead(neck_chans, num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^pos_embed|patch_embed', blocks=[('^blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, global_pool) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt == 'NCHW', 'Output shape for ViT-SAM must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) x = self.patch_embed(x) if self.pos_embed is not None: x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) x = self.pos_drop(x) x = self.patch_drop(x) x = self.norm_pre(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x) if i in take_indices: if norm: intermediates.append(self.neck(x.permute(0, 3, 1, 2))) else: intermediates.append(x.permute(0, 3, 1, 2)) if intermediates_only: return intermediates x = self.neck(x.permute(0, 3, 1, 2)) return (x, intermediates) def prune_intermediate_layers(self, indices: Optional[Union[int, List[int]]]=None, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.neck = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) if self.pos_embed is not None: x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) x = self.pos_drop(x) x = self.patch_drop(x) x = self.norm_pre(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.neck(x.permute(0, 3, 1, 2)) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict out_dict = {} for (k, v) in state_dict.items(): if k.startswith('image_encoder.'): k = k[14:] k = k.replace('mlp.lin', 'mlp.fc') elif sam_checkpoint: continue out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'samvit_base_patch16.sa1b': _cfg(url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_large_patch16.sa1b': _cfg(url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_huge_patch16.sa1b': _cfg(url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_base_patch16_224': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000, input_size=(3, 224, 224), crop_pct=0.9)}) def _create_vision_transformer(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) return build_model_with_cfg(VisionTransformerSAM, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) @register_model def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], window_size=14, use_rel_pos=True, img_size=1024) model = _create_vision_transformer('samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23], window_size=14, use_rel_pos=True, img_size=1024) model = _create_vision_transformer('samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: model_args = dict(patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31], window_size=14, use_rel_pos=True, img_size=1024) model = _create_vision_transformer('samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM: model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None) model = _create_vision_transformer('samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/vitamin.py """""" import math from dataclasses import dataclass, field from functools import partial from typing import Optional, Union, Tuple import torch import torch.nn as nn from timm.data import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import create_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, make_divisible, DropPath, HybridEmbed from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq from ._registry import register_model, generate_default_cfgs from .vision_transformer import VisionTransformer, checkpoint_filter_fn @dataclass class VitConvCfg: expand_ratio: float = 4.0 expand_output: bool = True kernel_size: int = 3 group_size: int = 1 pre_norm_act: bool = False stride_mode: str = 'dw' pool_type: str = 'avg2' downsample_pool_type: str = 'avg2' act_layer: str = 'gelu' norm_layer: str = '' norm_eps: float = 1e-05 down_shortcut: Optional[bool] = True mlp: str = 'mlp' @dataclass class VitCfg: embed_dim: Tuple[Union[int, Tuple[int, ...]], ...] = (96, 192, 384, 768) depths: Tuple[Union[int, Tuple[int, ...]], ...] = (2, 3, 5, 2) stem_width: int = 64 conv_cfg: VitConvCfg = field(default_factory=VitConvCfg) head_type: str = '' def _init_conv(module, name, scheme=''): if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) if module.bias is not None: nn.init.zeros_(module.bias) class Stem(nn.Module): def __init__(self, in_chs: int, out_chs: int, act_layer: str='gelu', norm_layer: str='layernorm2d', norm_eps: float=1e-06, bias: bool=True): super().__init__() norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) self.out_chs = out_chs self.conv1 = create_conv2d(in_chs, out_chs, 3, stride=2, bias=bias) self.norm1 = norm_act_layer(out_chs) self.conv2 = create_conv2d(out_chs, out_chs, 3, stride=1, bias=bias) named_apply(_init_conv, self) def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) return x class Downsample2d(nn.Module): def __init__(self, dim: int, dim_out: int, pool_type: str='avg2', bias: bool=True): super().__init__() self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False) if dim != dim_out: self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) else: self.expand = nn.Identity() def forward(self, x): x = self.pool(x) x = self.expand(x) return x class StridedConv(nn.Module): def __init__(self, kernel_size=3, stride=2, padding=1, in_chans=3, embed_dim=768): super().__init__() norm_layer = partial(get_norm_layer('layernorm2d'), eps=1e-06) self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) self.norm = norm_layer(in_chans) def forward(self, x): x = self.norm(x) x = self.proj(x) return x class MbConvLNBlock(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=1, drop_path: float=0.0, kernel_size: int=3, norm_layer: str='layernorm2d', norm_eps: float=1e-06, act_layer: str='gelu', expand_ratio: float=4.0): super(MbConvLNBlock, self).__init__() (self.stride, self.in_chs, self.out_chs) = (stride, in_chs, out_chs) mid_chs = make_divisible(out_chs * expand_ratio) prenorm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs, pool_type='avg', bias=True) elif in_chs != out_chs: self.shortcut = nn.Conv2d(in_chs, out_chs, 1, bias=True) else: self.shortcut = nn.Identity() self.pre_norm = prenorm_act_layer(in_chs, apply_act=False) self.down = nn.Identity() self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=1, bias=True) self.act1 = create_act_layer(act_layer, inplace=True) self.conv2_kxk = create_conv2d(mid_chs, mid_chs, kernel_size, stride=stride, dilation=1, groups=mid_chs, bias=True) self.act2 = create_act_layer(act_layer, inplace=True) self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=True) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): shortcut = self.shortcut(x) x = self.pre_norm(x) x = self.down(x) x = self.conv1_1x1(x) x = self.act1(x) x = self.conv2_kxk(x) x = self.act2(x) x = self.conv3_1x1(x) x = self.drop_path(x) + shortcut return x class MbConvStages(nn.Module): def __init__(self, cfg: VitCfg, img_size: Union[int, Tuple[int, int]]=224, in_chans: int=3): super().__init__() self.grad_checkpointing = False self.stem = Stem(in_chs=in_chans, out_chs=cfg.stem_width) stages = [] self.num_stages = len(cfg.embed_dim) for (s, dim) in enumerate(cfg.embed_dim[:2]): stage_in_chs = cfg.embed_dim[s - 1] if s > 0 else cfg.stem_width blocks = [MbConvLNBlock(in_chs=stage_in_chs if d == 0 else dim, out_chs=dim, stride=2 if d == 0 else 1) for d in range(cfg.depths[s])] stages += [nn.Sequential(*blocks)] self.stages = nn.Sequential(*stages) self.pool = StridedConv(stride=2, in_chans=cfg.embed_dim[1], embed_dim=cfg.embed_dim[2]) def forward(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.pool(x) return x class GeGluMlp(nn.Module): def __init__(self, in_features, hidden_features, act_layer='gelu', drop=0.0): super().__init__() norm_layer = partial(get_norm_layer('layernorm'), eps=1e-06) self.norm = norm_layer(in_features) self.w0 = nn.Linear(in_features, hidden_features) self.act = create_act_layer(act_layer) self.w1 = nn.Linear(in_features, hidden_features) self.w2 = nn.Linear(hidden_features, in_features) def forward(self, x): x = self.norm(x) x = self.act(self.w0(x)) * self.w1(x) x = self.w2(x) return x def _create_vitamin(variant, pretrained=False, embed_cfg=None, **kwargs): out_indices = kwargs.pop('out_indices', 3) assert embed_cfg is not None backbone = MbConvStages(cfg=embed_cfg, in_chans=kwargs.get('in_chans', 3)) kwargs['embed_layer'] = partial(HybridEmbed, backbone=backbone, proj=False) kwargs.setdefault('patch_size', 1) return build_model_with_cfg(VisionTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, 'first_conv': 'patch_embed.backbone.stem.conv1', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'vitamin_small_224.datacomp1b_clip_ltt': _cfg(hf_hub_id='jienengchen/ViTamin-S-LTT', num_classes=384), 'vitamin_small_224.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-S', num_classes=384), 'vitamin_base_224.datacomp1b_clip_ltt': _cfg(hf_hub_id='jienengchen/ViTamin-B-LTT', num_classes=768), 'vitamin_base_224.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-B', num_classes=768), 'vitamin_large_224.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L-224px', num_classes=768), 'vitamin_large_256.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L-256px', num_classes=768, input_size=(3, 256, 256), crop_pct=1.0), 'vitamin_large_336.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L-336px', num_classes=768, input_size=(3, 336, 336), crop_pct=1.0), 'vitamin_large_384.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L-384px', num_classes=768, input_size=(3, 384, 384), crop_pct=1.0), 'vitamin_large2_224.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L2-224px', num_classes=1024), 'vitamin_large2_256.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L2-256px', num_classes=1024, input_size=(3, 256, 256), crop_pct=1.0), 'vitamin_large2_336.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L2-336px', num_classes=1024, input_size=(3, 336, 336), crop_pct=1.0), 'vitamin_large2_384.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-L2-384px', num_classes=1024, input_size=(3, 384, 384), crop_pct=1.0), 'vitamin_xlarge_256.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-XL-256px', num_classes=1152, input_size=(3, 256, 256), crop_pct=1.0), 'vitamin_xlarge_336.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-XL-336px', num_classes=1152, input_size=(3, 336, 336), crop_pct=1.0), 'vitamin_xlarge_384.datacomp1b_clip': _cfg(hf_hub_id='jienengchen/ViTamin-XL-384px', num_classes=1152, input_size=(3, 384, 384), crop_pct=1.0)}) @register_model def vitamin_small_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(64, 128, 384), depths=(2, 4, 1), stem_width=64, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(embed_dim=384, depth=14, num_heads=6, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_base_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(128, 256, 768), depths=(2, 4, 1), stem_width=128, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(embed_dim=768, depth=14, num_heads=12, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_256(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=256, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_336(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=336, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_384(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=384, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_256(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=256, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large2_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_336(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=336, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large2_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_384(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=384, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large2_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_xlarge_256(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(192, 384, 1152), depths=(2, 4, 1), stem_width=192, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=256, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_xlarge_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_xlarge_336(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(192, 384, 1152), depths=(2, 4, 1), stem_width=192, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=336, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_xlarge_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_xlarge_384(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg(embed_dim=(192, 384, 1152), depths=(2, 4, 1), stem_width=192, conv_cfg=VitConvCfg(norm_layer='layernorm2d', norm_eps=1e-06), head_type='1d') model_args = dict(img_size=384, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2.0, class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_xlarge_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model # File: pytorch-image-models-main/timm/models/volo.py """""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._registry import register_model, generate_default_cfgs __all__ = ['VOLO'] class OutlookAttention(nn.Module): def __init__(self, dim, num_heads, kernel_size=3, padding=1, stride=1, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() head_dim = dim // num_heads self.num_heads = num_heads self.kernel_size = kernel_size self.padding = padding self.stride = stride self.scale = head_dim ** (-0.5) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride) self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) def forward(self, x): (B, H, W, C) = x.shape v = self.v(x).permute(0, 3, 1, 2) (h, w) = (math.ceil(H / self.stride), math.ceil(W / self.stride)) v = self.unfold(v).reshape(B, self.num_heads, C // self.num_heads, self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) attn = self.attn(attn).reshape(B, h * w, self.num_heads, self.kernel_size * self.kernel_size, self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) attn = attn * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w) x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride) x = self.proj(x.permute(0, 2, 3, 1)) x = self.proj_drop(x) return x class Outlooker(nn.Module): def __init__(self, dim, kernel_size, padding, stride=1, num_heads=1, mlp_ratio=3.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, qkv_bias=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = OutlookAttention(dim, num_heads, kernel_size=kernel_size, padding=padding, stride=stride, qkv_bias=qkv_bias, attn_drop=attn_drop) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.attn(self.norm1(x))) x = x + self.drop_path2(self.mlp(self.norm2(x))) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, H, W, C) = x.shape qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.0) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, H, W, C) x = self.proj(x) x = self.proj_drop(x) return x class Transformer(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.attn(self.norm1(x))) x = x + self.drop_path2(self.mlp(self.norm2(x))) return x class ClassAttention(nn.Module): def __init__(self, dim, num_heads=8, head_dim=None, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads if head_dim is not None: self.head_dim = head_dim else: head_dim = dim // num_heads self.head_dim = head_dim self.scale = head_dim ** (-0.5) self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias) self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(self.head_dim * self.num_heads, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, N, C) = x.shape kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) (k, v) = kv.unbind(0) q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads) cls_embed = self.proj(cls_embed) cls_embed = self.proj_drop(cls_embed) return cls_embed class ClassBlock(nn.Module): def __init__(self, dim, num_heads, head_dim=None, mlp_ratio=4.0, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttention(dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): cls_embed = x[:, :1] cls_embed = cls_embed + self.drop_path1(self.attn(self.norm1(x))) cls_embed = cls_embed + self.drop_path2(self.mlp(self.norm2(cls_embed))) return torch.cat([cls_embed, x[:, 1:]], dim=1) def get_block(block_type, **kargs): if block_type == 'ca': return ClassBlock(**kargs) def rand_bbox(size, lam, scale=1): W = size[1] // scale H = size[2] // scale cut_rat = np.sqrt(1.0 - lam) cut_w = (W * cut_rat).astype(int) cut_h = (H * cut_rat).astype(int) cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return (bbx1, bby1, bbx2, bby2) class PatchEmbed(nn.Module): def __init__(self, img_size=224, stem_conv=False, stem_stride=1, patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384): super().__init__() assert patch_size in [4, 8, 16] if stem_conv: self.conv = nn.Sequential(nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True)) else: self.conv = None self.proj = nn.Conv2d(hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride) self.num_patches = img_size // patch_size * (img_size // patch_size) def forward(self, x): if self.conv is not None: x = self.conv(x) x = self.proj(x) return x class Downsample(nn.Module): def __init__(self, in_embed_dim, out_embed_dim, patch_size=2): super().__init__() self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.proj(x) x = x.permute(0, 2, 3, 1) return x def outlooker_blocks(block_fn, index, dim, layers, num_heads=1, kernel_size=3, padding=1, stride=2, mlp_ratio=3.0, qkv_bias=False, attn_drop=0, drop_path_rate=0.0, **kwargs): blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn(dim, kernel_size=kernel_size, padding=padding, stride=stride, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr)) blocks = nn.Sequential(*blocks) return blocks def transformer_blocks(block_fn, index, dim, layers, num_heads, mlp_ratio=3.0, qkv_bias=False, attn_drop=0, drop_path_rate=0.0, **kwargs): blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn(dim, num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr)) blocks = nn.Sequential(*blocks) return blocks class VOLO(nn.Module): def __init__(self, layers, img_size=224, in_chans=3, num_classes=1000, global_pool='token', patch_size=8, stem_hidden_dim=64, embed_dims=None, num_heads=None, downsamples=(True, False, False, False), outlook_attention=(True, False, False, False), mlp_ratio=3.0, qkv_bias=False, drop_rate=0.0, pos_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, post_layers=('ca', 'ca'), use_aux_head=True, use_mix_token=False, pooling_scale=2): super().__init__() num_layers = len(layers) mlp_ratio = to_ntuple(num_layers)(mlp_ratio) img_size = to_2tuple(img_size) self.num_classes = num_classes self.global_pool = global_pool self.mix_token = use_mix_token self.pooling_scale = pooling_scale self.num_features = self.head_hidden_size = embed_dims[-1] if use_mix_token: self.beta = 1.0 assert global_pool == 'token', 'return all tokens if mix_token is enabled' self.grad_checkpointing = False self.patch_embed = PatchEmbed(stem_conv=True, stem_stride=2, patch_size=patch_size, in_chans=in_chans, hidden_dim=stem_hidden_dim, embed_dim=embed_dims[0]) r = patch_size patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale) self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1])) self.pos_drop = nn.Dropout(p=pos_drop_rate) self.stage_ends = [] self.feature_info = [] network = [] block_idx = 0 for i in range(len(layers)): if outlook_attention[i]: stage = outlooker_blocks(Outlooker, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, norm_layer=norm_layer) else: stage = transformer_blocks(Transformer, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, drop_path_rate=drop_path_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer) network.append(stage) self.stage_ends.append(block_idx) self.feature_info.append(dict(num_chs=embed_dims[i], reduction=r, module=f'network.{block_idx}')) block_idx += 1 if downsamples[i]: network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2)) r *= 2 block_idx += 1 self.network = nn.ModuleList(network) self.post_network = None if post_layers is not None: self.post_network = nn.ModuleList([get_block(post_layers[i], dim=embed_dims[-1], num_heads=num_heads[-1], mlp_ratio=mlp_ratio[-1], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, drop_path=0.0, norm_layer=norm_layer) for i in range(len(post_layers))]) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1])) trunc_normal_(self.cls_token, std=0.02) if use_aux_head: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() else: self.aux_head = None self.norm = norm_layer(self.num_features) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.pos_embed, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^cls_token|pos_embed|patch_embed', blocks=[('^network\\.(\\d+)\\.(\\d+)', None), ('^network\\.(\\d+)', (0,))], blocks2=[('^cls_token', (0,)), ('^post_network\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() if self.aux_head is not None: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_tokens(self, x): for (idx, block) in enumerate(self.network): if idx == 2: x = x + self.pos_embed x = self.pos_drop(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(block, x) else: x = block(x) (B, H, W, C) = x.shape x = x.reshape(B, -1, C) return x def forward_cls(self, x): (B, N, C) = x.shape cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat([cls_tokens, x], dim=1) for block in self.post_network: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(block, x) else: x = block(x) return x def forward_train(self, x): x = self.patch_embed(x) x = x.permute(0, 2, 3, 1) if self.mix_token and self.training: lam = np.random.beta(self.beta, self.beta) (patch_h, patch_w) = (x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale) (bbx1, bby1, bbx2, bby2) = rand_bbox(x.size(), lam, scale=self.pooling_scale) temp_x = x.clone() (sbbx1, sbby1) = (self.pooling_scale * bbx1, self.pooling_scale * bby1) (sbbx2, sbby2) = (self.pooling_scale * bbx2, self.pooling_scale * bby2) temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :] x = temp_x else: (bbx1, bby1, bbx2, bby2) = (0, 0, 0, 0) x = self.forward_tokens(x) if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) if self.global_pool == 'avg': x_cls = x.mean(dim=1) elif self.global_pool == 'token': x_cls = x[:, 0] else: x_cls = x if self.aux_head is None: return x_cls x_aux = self.aux_head(x[:, 1:]) if not self.training: return x_cls + 0.5 * x_aux.max(1)[0] if self.mix_token and self.training: x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]) temp_x = x_aux.clone() temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :] x_aux = temp_x x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]) return (x_cls, x_aux, (bbx1, bby1, bbx2, bby2)) def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW',), 'Output format must be NCHW.' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] (B, _, height, width) = x.shape x = self.patch_embed(x).permute(0, 2, 3, 1) if torch.jit.is_scripting() or not stop_early: network = self.network else: network = self.network[:max_index + 1] for (idx, block) in enumerate(network): if idx == 2: x = x + self.pos_embed x = self.pos_drop(x) x = block(x) if idx in take_indices: if norm and idx >= 2: x_inter = self.norm(x) else: x_inter = x intermediates.append(x_inter.permute(0, 3, 1, 2)) if intermediates_only: return intermediates (B, H, W, C) = x.shape x = x.reshape(B, -1, C) if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.network = self.network[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.post_network = nn.ModuleList() self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x).permute(0, 2, 3, 1) x = self.forward_tokens(x) if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool == 'avg': out = x.mean(dim=1) elif self.global_pool == 'token': out = x[:, 0] else: out = x x = self.head_drop(x) if pre_logits: return out out = self.head(out) if self.aux_head is not None: aux = self.aux_head(x[:, 1:]) out = out + 0.5 * aux.max(1)[0] return out def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_volo(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) return build_model_with_cfg(VOLO, variant, pretrained, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.96, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'), **kwargs} default_cfgs = generate_default_cfgs({'volo_d1_224.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar', crop_pct=0.96), 'volo_d1_384.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d2_224.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar', crop_pct=0.96), 'volo_d2_384.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d3_224.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar', crop_pct=0.96), 'volo_d3_448.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar', crop_pct=1.0, input_size=(3, 448, 448)), 'volo_d4_224.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar', crop_pct=0.96), 'volo_d4_448.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_224.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar', crop_pct=0.96), 'volo_d5_448.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_512.sail_in1k': _cfg(hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar', crop_pct=1.15, input_size=(3, 512, 512))}) @register_model def volo_d1_224(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args) return model @register_model def volo_d1_384(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args) return model @register_model def volo_d2_224(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args) return model @register_model def volo_d2_384(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args) return model @register_model def volo_d3_224(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args) return model @register_model def volo_d3_448(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args) return model @register_model def volo_d4_224(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args) return model @register_model def volo_d4_448(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_224(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args) return model @register_model def volo_d5_448(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_512(pretrained=False, **kwargs) -> VOLO: model_args = dict(layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args) return model # File: pytorch-image-models-main/timm/models/vovnet.py """""" from typing import List, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, create_attn, create_norm_act_layer from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['VovNet'] class SequentialAppendList(nn.Sequential): def __init__(self, *args): super(SequentialAppendList, self).__init__(*args) def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: for (i, module) in enumerate(self): if i == 0: concat_list.append(module(x)) else: concat_list.append(module(concat_list[-1])) x = torch.cat(concat_list, dim=1) return x class OsaBlock(nn.Module): def __init__(self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None): super(OsaBlock, self).__init__() self.residual = residual self.depthwise = depthwise conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) next_in_chs = in_chs if self.depthwise and next_in_chs != mid_chs: assert not residual self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs) else: self.conv_reduction = None mid_convs = [] for i in range(layer_per_block): if self.depthwise: conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs) else: conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs) next_in_chs = mid_chs mid_convs.append(conv) self.conv_mid = SequentialAppendList(*mid_convs) next_in_chs = in_chs + layer_per_block * mid_chs self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs) self.attn = create_attn(attn, out_chs) if attn else None self.drop_path = drop_path def forward(self, x): output = [x] if self.conv_reduction is not None: x = self.conv_reduction(x) x = self.conv_mid(x, output) x = self.conv_concat(x) if self.attn is not None: x = self.attn(x) if self.drop_path is not None: x = self.drop_path(x) if self.residual: x = x + output[0] return x class OsaStage(nn.Module): def __init__(self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rates=None): super(OsaStage, self).__init__() self.grad_checkpointing = False if downsample: self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) else: self.pool = None blocks = [] for i in range(block_per_stage): last_block = i == block_per_stage - 1 if drop_path_rates is not None and drop_path_rates[i] > 0.0: drop_path = DropPath(drop_path_rates[i]) else: drop_path = None blocks += [OsaBlock(in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path)] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.pool is not None: x = self.pool(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class VovNet(nn.Module): def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_rate=0.0, drop_path_rate=0.0, **kwargs): super(VovNet, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride == 32 cfg = dict(cfg, **kwargs) stem_stride = cfg.get('stem_stride', 4) stem_chs = cfg['stem_chs'] stage_conv_chs = cfg['stage_conv_chs'] stage_out_chs = cfg['stage_out_chs'] block_per_stage = cfg['block_per_stage'] layer_per_block = cfg['layer_per_block'] conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) last_stem_stride = stem_stride // 2 conv_type = SeparableConvNormAct if cfg['depthwise'] else ConvNormAct self.stem = nn.Sequential(*[ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs)]) self.feature_info = [dict(num_chs=stem_chs[1], reduction=2, module=f'stem.{(1 if stem_stride == 4 else 2)}')] current_stride = stem_stride stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] stage_args = dict(residual=cfg['residual'], depthwise=cfg['depthwise'], attn=cfg['attn'], **conv_kwargs) stages = [] for i in range(4): downsample = stem_stride == 2 or i > 0 stages += [OsaStage(in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args)] self.num_features = stage_out_chs[i] current_stride *= 2 if downsample else 1 self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.head_hidden_size = self.num_features self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) for (n, m) in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.Linear): nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^stages\\.(\\d+)' if coarse else '^stages\\.(\\d+).blocks\\.(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes, global_pool: Optional[str]=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) return self.stages(x) def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x model_cfgs = dict(vovnet39a=dict(stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=False, depthwise=False, attn=''), vovnet57a=dict(stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=False, depthwise=False, attn=''), ese_vovnet19b_slim_dw=dict(stem_chs=[64, 64, 64], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese'), ese_vovnet19b_dw=dict(stem_chs=[64, 64, 64], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese'), ese_vovnet19b_slim=dict(stem_chs=[64, 64, 128], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese'), ese_vovnet19b=dict(stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese'), ese_vovnet39b=dict(stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='ese'), ese_vovnet57b=dict(stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=True, depthwise=False, attn='ese'), ese_vovnet99b=dict(stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 3, 9, 3], residual=True, depthwise=False, attn='ese'), eca_vovnet39b=dict(stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='eca')) model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] def _create_vovnet(variant, pretrained=False, **kwargs): return build_model_with_cfg(VovNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'vovnet39a.untrained': _cfg(url=''), 'vovnet57a.untrained': _cfg(url=''), 'ese_vovnet19b_slim_dw.untrained': _cfg(url=''), 'ese_vovnet19b_dw.ra_in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet19b_slim.untrained': _cfg(url=''), 'ese_vovnet39b.ra_in1k': _cfg(hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet57b.untrained': _cfg(url=''), 'ese_vovnet99b.untrained': _cfg(url=''), 'eca_vovnet39b.untrained': _cfg(url=''), 'ese_vovnet39b_evos.untrained': _cfg(url='')}) @register_model def vovnet39a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) @register_model def vovnet57a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) @register_model def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) @register_model def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet: def norm_act_fn(num_features, **nkwargs): return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs) return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) # File: pytorch-image-models-main/timm/models/xception.py """""" import torch.jit import torch.nn as nn import torch.nn.functional as F from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Xception'] class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): super(SeparableConv2d, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) def forward(self, x): x = self.conv1(x) x = self.pointwise(x) return x class Block(nn.Module): def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): super(Block, self).__init__() if out_channels != in_channels or strides != 1: self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) self.skipbn = nn.BatchNorm2d(out_channels) else: self.skip = None rep = [] for i in range(reps): if grow_first: inc = in_channels if i == 0 else out_channels outc = out_channels else: inc = in_channels outc = in_channels if i < reps - 1 else out_channels rep.append(nn.ReLU(inplace=True)) rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) rep.append(nn.BatchNorm2d(outc)) if not start_with_relu: rep = rep[1:] else: rep[0] = nn.ReLU(inplace=False) if strides != 1: rep.append(nn.MaxPool2d(3, strides, 1)) self.rep = nn.Sequential(*rep) def forward(self, inp): x = self.rep(inp) if self.skip is not None: skip = self.skip(inp) skip = self.skipbn(skip) else: skip = inp x += skip return x class Xception(nn.Module): def __init__(self, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): super(Xception, self).__init__() self.drop_rate = drop_rate self.global_pool = global_pool self.num_classes = num_classes self.num_features = self.head_hidden_size = 2048 self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) self.bn1 = nn.BatchNorm2d(32) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, 3, bias=False) self.bn2 = nn.BatchNorm2d(64) self.act2 = nn.ReLU(inplace=True) self.block1 = Block(64, 128, 2, 2, start_with_relu=False) self.block2 = Block(128, 256, 2, 2) self.block3 = Block(256, 728, 2, 2) self.block4 = Block(728, 728, 3, 1) self.block5 = Block(728, 728, 3, 1) self.block6 = Block(728, 728, 3, 1) self.block7 = Block(728, 728, 3, 1) self.block8 = Block(728, 728, 3, 1) self.block9 = Block(728, 728, 3, 1) self.block10 = Block(728, 728, 3, 1) self.block11 = Block(728, 728, 3, 1) self.block12 = Block(728, 1024, 2, 2, grow_first=False) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = nn.BatchNorm2d(1536) self.act3 = nn.ReLU(inplace=True) self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) self.bn4 = nn.BatchNorm2d(self.num_features) self.act4 = nn.ReLU(inplace=True) self.feature_info = [dict(num_chs=64, reduction=2, module='act2'), dict(num_chs=128, reduction=4, module='block2.rep.0'), dict(num_chs=256, reduction=8, module='block3.rep.0'), dict(num_chs=728, reduction=16, module='block12.rep.0'), dict(num_chs=2048, reduction=32, module='act4')] (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^conv[12]|bn[12]', blocks=[('^block(\\d+)', None), ('^conv[34]|bn[34]', (99,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str='avg'): self.num_classes = num_classes (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.block1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(x) x = self.block6(x) x = self.block7(x) x = self.block8(x) x = self.block9(x) x = self.block10(x) x = self.block11(x) x = self.block12(x) x = self.conv3(x) x = self.bn3(x) x = self.act3(x) x = self.conv4(x) x = self.bn4(x) x = self.act4(x) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) if self.drop_rate: F.dropout(x, self.drop_rate, training=self.training) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _xception(variant, pretrained=False, **kwargs): return build_model_with_cfg(Xception, variant, pretrained, feature_cfg=dict(feature_cls='hook'), **kwargs) default_cfgs = generate_default_cfgs({'legacy_xception.tf_in1k': {'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', 'input_size': (3, 299, 299), 'pool_size': (10, 10), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv1', 'classifier': 'fc'}}) @register_model def legacy_xception(pretrained=False, **kwargs) -> Xception: return _xception('legacy_xception', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, {'xception': 'legacy_xception'}) # File: pytorch-image-models-main/timm/models/xception_aligned.py """""" from functools import partial from typing import List, Dict, Type, Optional import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import ClassifierHead, ConvNormAct, DropPath, PadType, create_conv2d, get_norm_act_layer from timm.layers.helpers import to_3tuple from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['XceptionAligned'] class SeparableConv2d(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: int=1, padding: PadType='', act_layer: Type[nn.Module]=nn.ReLU, norm_layer: Type[nn.Module]=nn.BatchNorm2d): super(SeparableConv2d, self).__init__() self.kernel_size = kernel_size self.dilation = dilation self.conv_dw = create_conv2d(in_chs, in_chs, kernel_size, stride=stride, padding=padding, dilation=dilation, depthwise=True) self.bn_dw = norm_layer(in_chs) self.act_dw = act_layer(inplace=True) if act_layer is not None else nn.Identity() self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) self.bn_pw = norm_layer(out_chs) self.act_pw = act_layer(inplace=True) if act_layer is not None else nn.Identity() def forward(self, x): x = self.conv_dw(x) x = self.bn_dw(x) x = self.act_dw(x) x = self.conv_pw(x) x = self.bn_pw(x) x = self.act_pw(x) return x class PreSeparableConv2d(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, stride: int=1, dilation: int=1, padding: PadType='', act_layer: Type[nn.Module]=nn.ReLU, norm_layer: Type[nn.Module]=nn.BatchNorm2d, first_act: bool=True): super(PreSeparableConv2d, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) self.kernel_size = kernel_size self.dilation = dilation self.norm = norm_act_layer(in_chs, inplace=True) if first_act else nn.Identity() self.conv_dw = create_conv2d(in_chs, in_chs, kernel_size, stride=stride, padding=padding, dilation=dilation, depthwise=True) self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) def forward(self, x): x = self.norm(x) x = self.conv_dw(x) x = self.conv_pw(x) return x class XceptionModule(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=1, dilation: int=1, pad_type: PadType='', start_with_relu: bool=True, no_skip: bool=False, act_layer: Type[nn.Module]=nn.ReLU, norm_layer: Optional[Type[nn.Module]]=None, drop_path: Optional[nn.Module]=None): super(XceptionModule, self).__init__() out_chs = to_3tuple(out_chs) self.in_channels = in_chs self.out_channels = out_chs[-1] self.no_skip = no_skip if not no_skip and (self.out_channels != self.in_channels or stride != 1): self.shortcut = ConvNormAct(in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, apply_act=False) else: self.shortcut = None separable_act_layer = None if start_with_relu else act_layer self.stack = nn.Sequential() for i in range(3): if start_with_relu: self.stack.add_module(f'act{i + 1}', act_layer(inplace=i > 0)) self.stack.add_module(f'conv{i + 1}', SeparableConv2d(in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, act_layer=separable_act_layer, norm_layer=norm_layer)) in_chs = out_chs[i] self.drop_path = drop_path def forward(self, x): skip = x x = self.stack(x) if self.shortcut is not None: skip = self.shortcut(skip) if not self.no_skip: if self.drop_path is not None: x = self.drop_path(x) x = x + skip return x class PreXceptionModule(nn.Module): def __init__(self, in_chs: int, out_chs: int, stride: int=1, dilation: int=1, pad_type: PadType='', no_skip: bool=False, act_layer: Type[nn.Module]=nn.ReLU, norm_layer: Optional[Type[nn.Module]]=None, drop_path: Optional[nn.Module]=None): super(PreXceptionModule, self).__init__() out_chs = to_3tuple(out_chs) self.in_channels = in_chs self.out_channels = out_chs[-1] self.no_skip = no_skip if not no_skip and (self.out_channels != self.in_channels or stride != 1): self.shortcut = create_conv2d(in_chs, self.out_channels, 1, stride=stride) else: self.shortcut = nn.Identity() self.norm = get_norm_act_layer(norm_layer, act_layer=act_layer)(in_chs, inplace=True) self.stack = nn.Sequential() for i in range(3): self.stack.add_module(f'conv{i + 1}', PreSeparableConv2d(in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, act_layer=act_layer, norm_layer=norm_layer, first_act=i > 0)) in_chs = out_chs[i] self.drop_path = drop_path def forward(self, x): x = self.norm(x) skip = x x = self.stack(x) if not self.no_skip: if self.drop_path is not None: x = self.drop_path(x) x = x + self.shortcut(skip) return x class XceptionAligned(nn.Module): def __init__(self, block_cfg: List[Dict], num_classes: int=1000, in_chans: int=3, output_stride: int=32, preact: bool=False, act_layer: Type[nn.Module]=nn.ReLU, norm_layer: Type[nn.Module]=nn.BatchNorm2d, drop_rate: float=0.0, drop_path_rate: float=0.0, global_pool: str='avg'): super(XceptionAligned, self).__init__() assert output_stride in (8, 16, 32) self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) self.stem = nn.Sequential(*[ConvNormAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), create_conv2d(32, 64, kernel_size=3, stride=1) if preact else ConvNormAct(32, 64, kernel_size=3, stride=1, **layer_args)]) curr_dilation = 1 curr_stride = 2 self.feature_info = [] self.blocks = nn.Sequential() module_fn = PreXceptionModule if preact else XceptionModule net_num_blocks = len(block_cfg) net_block_idx = 0 for (i, b) in enumerate(block_cfg): block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) b['drop_path'] = DropPath(block_dpr) if block_dpr > 0.0 else None b['dilation'] = curr_dilation if b['stride'] > 1: name = f'blocks.{i}.stack.conv2' if preact else f'blocks.{i}.stack.act3' self.feature_info += [dict(num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=name)] next_stride = curr_stride * b['stride'] if next_stride > output_stride: curr_dilation *= b['stride'] b['stride'] = 1 else: curr_stride = next_stride self.blocks.add_module(str(i), module_fn(**b, **layer_args)) self.num_features = self.blocks[-1].out_channels net_block_idx += 1 self.feature_info += [dict(num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] self.act = act_layer(inplace=True) if preact else nn.Identity() self.head_hidden_size = self.num_features self.head = ClassifierHead(in_features=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^stem', blocks='^blocks\\.(\\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.act(x) return x def forward_head(self, x, pre_logits: bool=False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _xception(variant, pretrained=False, **kwargs): return build_model_with_cfg(XceptionAligned, variant, pretrained, feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), **kwargs) def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), 'crop_pct': 0.903, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs} default_cfgs = generate_default_cfgs({'xception65.ra3_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.94), 'xception41.tf_in1k': _cfg(hf_hub_id='timm/'), 'xception65.tf_in1k': _cfg(hf_hub_id='timm/'), 'xception71.tf_in1k': _cfg(hf_hub_id='timm/'), 'xception41p.ra3_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.94), 'xception65p.ra3_in1k': _cfg(hf_hub_id='timm/', crop_pct=0.94)}) @register_model def xception41(pretrained=False, **kwargs) -> XceptionAligned: block_cfg = [dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), *[dict(in_chs=728, out_chs=728, stride=1)] * 8, dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False)] model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1)) return _xception('xception41', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def xception65(pretrained=False, **kwargs) -> XceptionAligned: block_cfg = [dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), *[dict(in_chs=728, out_chs=728, stride=1)] * 16, dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False)] model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1)) return _xception('xception65', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def xception71(pretrained=False, **kwargs) -> XceptionAligned: block_cfg = [dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=1), dict(in_chs=256, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=1), dict(in_chs=728, out_chs=728, stride=2), *[dict(in_chs=728, out_chs=728, stride=1)] * 16, dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False)] model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1)) return _xception('xception71', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def xception41p(pretrained=False, **kwargs) -> XceptionAligned: block_cfg = [dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), *[dict(in_chs=728, out_chs=728, stride=1)] * 8, dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), no_skip=True, stride=1)] model_args = dict(block_cfg=block_cfg, preact=True, norm_layer=nn.BatchNorm2d) return _xception('xception41p', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def xception65p(pretrained=False, **kwargs) -> XceptionAligned: block_cfg = [dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), *[dict(in_chs=728, out_chs=728, stride=1)] * 16, dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True)] model_args = dict(block_cfg=block_cfg, preact=True, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1)) return _xception('xception65p', pretrained=pretrained, **dict(model_args, **kwargs)) # File: pytorch-image-models-main/timm/models/xcit.py """""" import math from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .cait import ClassAttn from .vision_transformer import Mlp __all__ = ['Xcit'] @register_notrace_module class PositionalEncodingFourier(nn.Module): def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim self.eps = 1e-06 def forward(self, B: int, H: int, W: int): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype y_embed = torch.arange(1, H + 1, device=device).to(torch.float32).unsqueeze(1).repeat(1, 1, W) x_embed = torch.arange(1, W + 1, device=device).to(torch.float32).repeat(1, H, 1) y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale dim_t = torch.arange(self.hidden_dim, device=device).to(torch.float32) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos.to(dtype)) return pos.repeat(B, 1, 1, 1) def conv3x3(in_planes, out_planes, stride=1): return torch.nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(out_planes)) class ConvPatchEmbed(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): super().__init__() img_size = to_2tuple(img_size) num_patches = img_size[1] // patch_size * (img_size[0] // patch_size) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches if patch_size == 16: self.proj = torch.nn.Sequential(conv3x3(in_chans, embed_dim // 8, 2), act_layer(), conv3x3(embed_dim // 8, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2)) elif patch_size == 8: self.proj = torch.nn.Sequential(conv3x3(in_chans, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2)) else: raise 'For convolutional projection, patch size has to be in [8, 16]' def forward(self, x): x = self.proj(x) (Hp, Wp) = (x.shape[2], x.shape[3]) x = x.flatten(2).transpose(1, 2) return (x, (Hp, Wp)) class LPI(nn.Module): def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): super().__init__() out_features = out_features or in_features padding = kernel_size // 2 self.conv1 = torch.nn.Conv2d(in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) self.act = act_layer() self.bn = nn.BatchNorm2d(in_features) self.conv2 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) def forward(self, x, H: int, W: int): (B, N, C) = x.shape x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.conv1(x) x = self.act(x) x = self.bn(x) x = self.conv2(x) x = x.reshape(B, C, N).permute(0, 2, 1) return x class ClassAttentionBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.0, tokens_norm=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttn(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) if eta is not None: self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) else: (self.gamma1, self.gamma2) = (1.0, 1.0) self.tokens_norm = tokens_norm def forward(self, x): x_norm1 = self.norm1(x) x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) x = x + self.drop_path(self.gamma1 * x_attn) if self.tokens_norm: x = self.norm2(x) else: x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) x_res = x cls_token = x[:, 0:1] cls_token = self.gamma2 * self.mlp(cls_token) x = torch.cat([cls_token, x[:, 1:]], dim=1) x = x_res + self.drop_path(x) return x class XCA(nn.Module): fused_attn: torch.jit.Final[bool] '' def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads self.fused_attn = use_fused_attn(experimental=True) self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): (B, N, C) = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) (q, k, v) = qkv.unbind(0) if self.fused_attn: q = torch.nn.functional.normalize(q, dim=-1) * self.temperature k = torch.nn.functional.normalize(k, dim=-1) x = torch.nn.functional.scaled_dot_product_attention(q, k, v, scale=1.0) else: q = torch.nn.functional.normalize(q, dim=-1) k = torch.nn.functional.normalize(k, dim=-1) attn = q @ k.transpose(-2, -1) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class XCABlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, proj_drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.0): super().__init__() self.norm1 = norm_layer(dim) self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm3 = norm_layer(dim) self.local_mp = LPI(in_features=dim, act_layer=act_layer) self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma3 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) def forward(self, x, H: int, W: int): x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) return x class Xcit(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True, drop_rate=0.0, pos_drop_rate=0.0, proj_drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1.0, tokens_norm=False): super().__init__() assert global_pool in ('', 'avg', 'token') img_size = to_2tuple(img_size) assert img_size[0] % patch_size == 0 and img_size[0] % patch_size == 0, '`patch_size` should divide image dimensions evenly' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-06) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.global_pool = global_pool self.grad_checkpointing = False self.patch_embed = ConvPatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer) r = patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_pos_embed: self.pos_embed = PositionalEncodingFourier(dim=embed_dim) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) self.blocks = nn.ModuleList([XCABlock(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta) for _ in range(depth)]) self.feature_info = [dict(num_chs=embed_dim, reduction=r, module=f'blocks.{i}') for i in range(depth)] self.cls_attn_blocks = nn.ModuleList([ClassAttentionBlock(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=drop_rate, attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm) for _ in range(cls_attn_layers)]) self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem='^cls_token|pos_embed|patch_embed', blocks='^blocks\\.(\\d+)', cls_attn_blocks=[('^cls_attn_blocks\\.(\\d+)', None), ('^norm', (99999,))]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str]=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates(self, x: torch.Tensor, indices: Optional[Union[int, List[int]]]=None, norm: bool=False, stop_early: bool=False, output_fmt: str='NCHW', intermediates_only: bool=False) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) (B, _, height, width) = x.shape (x, (Hp, Wp)) = self.patch_embed(x) if self.pos_embed is not None: pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = self.pos_drop(x) if torch.jit.is_scripting() or not stop_early: blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for (i, blk) in enumerate(blocks): x = blk(x, Hp, Wp) if i in take_indices: intermediates.append(self.norm(x) if norm else x) if reshape: intermediates = [y.reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if intermediates_only: return intermediates x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) for blk in self.cls_attn_blocks: x = blk(x) x = self.norm(x) return (x, intermediates) def prune_intermediate_layers(self, indices: Union[int, List[int]]=1, prune_norm: bool=False, prune_head: bool=True): (take_indices, max_index) = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] if prune_norm: self.norm = nn.Identity() if prune_head: self.cls_attn_blocks = nn.ModuleList() self.reset_classifier(0, '') return take_indices def forward_features(self, x): B = x.shape[0] (x, (Hp, Wp)) = self.patch_embed(x) if self.pos_embed is not None: pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = self.pos_drop(x) for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(blk, x, Hp, Wp) else: x = blk(x, Hp, Wp) x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) for blk in self.cls_attn_blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(blk, x) else: x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool=False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] use_pos_embed = getattr(model, 'pos_embed', None) is not None pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] for k in pos_embed_keys: if use_pos_embed: state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) else: del state_dict[k] if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): num_ca_blocks = len(model.cls_attn_blocks) for i in range(num_ca_blocks): qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) for (j, subscript) in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) if qkv_bias is not None: qkv_bias = qkv_bias.reshape(3, -1) for (j, subscript) in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] return state_dict def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg(Xcit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs) return model def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', **kwargs} default_cfgs = generate_default_cfgs({'xcit_nano_12_p16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), 'xcit_nano_12_p16_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), 'xcit_nano_12_p16_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), 'xcit_tiny_12_p16_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), 'xcit_tiny_12_p16_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), 'xcit_tiny_24_p16_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), 'xcit_tiny_24_p16_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), 'xcit_small_12_p16_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), 'xcit_small_12_p16_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), 'xcit_small_24_p16_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), 'xcit_small_24_p16_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), 'xcit_medium_24_p16_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), 'xcit_medium_24_p16_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p16_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), 'xcit_large_24_p16_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), 'xcit_large_24_p16_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_nano_12_p8_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), 'xcit_nano_12_p8_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), 'xcit_nano_12_p8_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p8_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), 'xcit_tiny_12_p8_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), 'xcit_tiny_12_p8_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p8_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), 'xcit_tiny_24_p8_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), 'xcit_tiny_24_p8_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p8_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), 'xcit_small_12_p8_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), 'xcit_small_12_p8_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p8_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), 'xcit_small_24_p8_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), 'xcit_small_24_p8_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p8_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), 'xcit_medium_24_p8_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), 'xcit_medium_24_p8_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p8_224.fb_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), 'xcit_large_24_p8_224.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), 'xcit_large_24_p8_384.fb_dist_in1k': _cfg(hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384))}) @register_model def xcit_nano_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384) model = _create_xcit('xcit_nano_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict(patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-05, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, {'xcit_nano_12_p16_224_dist': 'xcit_nano_12_p16_224.fb_dist_in1k', 'xcit_nano_12_p16_384_dist': 'xcit_nano_12_p16_384.fb_dist_in1k', 'xcit_tiny_12_p16_224_dist': 'xcit_tiny_12_p16_224.fb_dist_in1k', 'xcit_tiny_12_p16_384_dist': 'xcit_tiny_12_p16_384.fb_dist_in1k', 'xcit_tiny_24_p16_224_dist': 'xcit_tiny_24_p16_224.fb_dist_in1k', 'xcit_tiny_24_p16_384_dist': 'xcit_tiny_24_p16_384.fb_dist_in1k', 'xcit_small_12_p16_224_dist': 'xcit_small_12_p16_224.fb_dist_in1k', 'xcit_small_12_p16_384_dist': 'xcit_small_12_p16_384.fb_dist_in1k', 'xcit_small_24_p16_224_dist': 'xcit_small_24_p16_224.fb_dist_in1k', 'xcit_small_24_p16_384_dist': 'xcit_small_24_p16_384.fb_dist_in1k', 'xcit_medium_24_p16_224_dist': 'xcit_medium_24_p16_224.fb_dist_in1k', 'xcit_medium_24_p16_384_dist': 'xcit_medium_24_p16_384.fb_dist_in1k', 'xcit_large_24_p16_224_dist': 'xcit_large_24_p16_224.fb_dist_in1k', 'xcit_large_24_p16_384_dist': 'xcit_large_24_p16_384.fb_dist_in1k', 'xcit_nano_12_p8_224_dist': 'xcit_nano_12_p8_224.fb_dist_in1k', 'xcit_nano_12_p8_384_dist': 'xcit_nano_12_p8_384.fb_dist_in1k', 'xcit_tiny_12_p8_224_dist': 'xcit_tiny_12_p8_224.fb_dist_in1k', 'xcit_tiny_12_p8_384_dist': 'xcit_tiny_12_p8_384.fb_dist_in1k', 'xcit_tiny_24_p8_224_dist': 'xcit_tiny_24_p8_224.fb_dist_in1k', 'xcit_tiny_24_p8_384_dist': 'xcit_tiny_24_p8_384.fb_dist_in1k', 'xcit_small_12_p8_224_dist': 'xcit_small_12_p8_224.fb_dist_in1k', 'xcit_small_12_p8_384_dist': 'xcit_small_12_p8_384.fb_dist_in1k', 'xcit_small_24_p8_224_dist': 'xcit_small_24_p8_224.fb_dist_in1k', 'xcit_small_24_p8_384_dist': 'xcit_small_24_p8_384.fb_dist_in1k', 'xcit_medium_24_p8_224_dist': 'xcit_medium_24_p8_224.fb_dist_in1k', 'xcit_medium_24_p8_384_dist': 'xcit_medium_24_p8_384.fb_dist_in1k', 'xcit_large_24_p8_224_dist': 'xcit_large_24_p8_224.fb_dist_in1k', 'xcit_large_24_p8_384_dist': 'xcit_large_24_p8_384.fb_dist_in1k'}) # File: pytorch-image-models-main/timm/optim/__init__.py from .adabelief import AdaBelief from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .adamw import AdamW from .adan import Adan from .lamb import Lamb from .lars import Lars from .lookahead import Lookahead from .madgrad import MADGRAD from .nadam import Nadam from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from .lion import Lion from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs # File: pytorch-image-models-main/timm/optim/adabelief.py import math import torch from torch.optim.optimizer import Optimizer class AdaBelief(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= eps: raise ValueError('Invalid epsilon value: {}'.format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): for param in params: if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): param['buffer'] = [[None, None, None] for _ in range(10)] defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) super(AdaBelief, self).__init__(params, defaults) def __setstate__(self, state): super(AdaBelief, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def reset(self): for group in self.param_groups: for p in group['params']: state = self.state[p] amsgrad = group['amsgrad'] state['step'] = 0 state['exp_avg'] = torch.zeros_like(p) state['exp_avg_var'] = torch.zeros_like(p) if amsgrad: state['max_exp_avg_var'] = torch.zeros_like(p) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError('AdaBelief does not support sparse gradients, please consider SparseAdam instead') p_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_fp32 = p_fp32.float() amsgrad = group['amsgrad'] (beta1, beta2) = group['betas'] state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_fp32) state['exp_avg_var'] = torch.zeros_like(p_fp32) if amsgrad: state['max_exp_avg_var'] = torch.zeros_like(p_fp32) if group['decoupled_decay']: if not group['fixed_decay']: p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) else: p_fp32.mul_(1.0 - group['weight_decay']) elif group['weight_decay'] != 0: grad.add_(p_fp32, alpha=group['weight_decay']) (exp_avg, exp_avg_var) = (state['exp_avg'], state['exp_avg_var']) state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) grad_residual = grad - exp_avg exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) if amsgrad: max_exp_avg_var = state['max_exp_avg_var'] torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) if not group['rectify']: step_size = group['lr'] / bias_correction1 p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: (num_sma, step_size) = (buffered[1], buffered[2]) else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma if num_sma >= 5: step_size = math.sqrt((1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) elif group['degenerated_to_sgd']: step_size = 1.0 / (1 - beta1 ** state['step']) else: step_size = -1 buffered[2] = step_size if num_sma >= 5: denom = exp_avg_var.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) elif step_size > 0: p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_fp32) return loss # File: pytorch-image-models-main/timm/optim/adafactor.py """""" import torch import math class Adafactor(torch.optim.Optimizer): def __init__(self, params, lr=None, eps=1e-30, eps_scale=0.001, clip_threshold=1.0, decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): relative_step = not lr if warmup_init and (not relative_step): raise ValueError('warmup_init requires relative_step=True') beta1 = None if betas is None else betas[0] defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init) super(Adafactor, self).__init__(params, defaults) @staticmethod def _get_lr(param_group, param_state): if param_group['relative_step']: min_step = 1e-06 * param_state['step'] if param_group['warmup_init'] else 0.01 lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps_scale'], param_state['RMS']) param_group['lr'] = lr_t * param_scale return param_group['lr'] @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group['beta1'] is not None return (factored, use_first_moment) @staticmethod def _rms(tensor): return tensor.norm(2) / tensor.numel() ** 0.5 def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] (factored, use_first_moment) = self._get_options(group, grad.shape) if len(state) == 0: state['step'] = 0 if use_first_moment: state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].to(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) p_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_fp32 = p_fp32.float() state['step'] += 1 state['RMS'] = self._rms(p_fp32) lr_t = self._get_lr(group, state) beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) update = grad ** 2 + group['eps'] if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) update.mul_(lr_t) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) update = exp_avg if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) p_fp32.add_(-update) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_fp32) return loss # File: pytorch-image-models-main/timm/optim/adahessian.py """""" import torch class Adahessian(torch.optim.Optimizer): def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): if not 0.0 <= lr: raise ValueError(f'Invalid learning rate: {lr}') if not 0.0 <= eps: raise ValueError(f'Invalid epsilon value: {eps}') if not 0.0 <= betas[0] < 1.0: raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}') if not 0.0 <= betas[1] < 1.0: raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}') if not 0.0 <= hessian_power <= 1.0: raise ValueError(f'Invalid Hessian power value: {hessian_power}') self.n_samples = n_samples self.update_each = update_each self.avg_conv_kernel = avg_conv_kernel self.seed = 2147483647 self.generator = torch.Generator().manual_seed(self.seed) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) super(Adahessian, self).__init__(params, defaults) for p in self.get_params(): p.hess = 0.0 self.state[p]['hessian step'] = 0 @property def is_second_order(self): return True def get_params(self): return (p for group in self.param_groups for p in group['params'] if p.requires_grad) def zero_hessian(self): for p in self.get_params(): if not isinstance(p.hess, float) and self.state[p]['hessian step'] % self.update_each == 0: p.hess.zero_() @torch.no_grad() def set_hessian(self): params = [] for p in filter(lambda p: p.grad is not None, self.get_params()): if self.state[p]['hessian step'] % self.update_each == 0: params.append(p) self.state[p]['hessian step'] += 1 if len(params) == 0: return if self.generator.device != params[0].device: self.generator = torch.Generator(params[0].device).manual_seed(self.seed) grads = [p.grad for p in params] for i in range(self.n_samples): zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] h_zs = torch.autograd.grad(grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) for (h_z, z, p) in zip(h_zs, zs, params): p.hess += h_z * z / self.n_samples @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: loss = closure() self.zero_hessian() self.set_hessian() for group in self.param_groups: for p in group['params']: if p.grad is None or p.hess is None: continue if self.avg_conv_kernel and p.dim() == 4: p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() p.mul_(1 - group['lr'] * group['weight_decay']) state = self.state[p] if len(state) == 1: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p) state['exp_hessian_diag_sq'] = torch.zeros_like(p) (exp_avg, exp_hessian_diag_sq) = (state['exp_avg'], state['exp_hessian_diag_sq']) (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] k = group['hessian_power'] denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss # File: pytorch-image-models-main/timm/optim/adamp.py """""" import torch import torch.nn.functional as F from torch.optim.optimizer import Optimizer import math def _channel_view(x) -> torch.Tensor: return x.reshape(x.size(0), -1) def _layer_view(x) -> torch.Tensor: return x.reshape(1, -1) def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): wd = 1.0 expand_size = (-1,) + (1,) * (len(p.shape) - 1) for view_func in [_channel_view, _layer_view]: param_view = view_func(p) grad_view = view_func(grad) cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) wd = wd_ratio return (perturb, wd) return (perturb, wd) class AdamP(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) super(AdamP, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad (beta1, beta2) = group['betas'] nesterov = group['nesterov'] state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 if nesterov: perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom else: perturb = exp_avg / denom wd_ratio = 1.0 if len(p.shape) > 1: (perturb, wd_ratio) = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) if group['weight_decay'] > 0: p.mul_(1.0 - group['lr'] * group['weight_decay'] * wd_ratio) p.add_(perturb, alpha=-step_size) return loss # File: pytorch-image-models-main/timm/optim/adamw.py """""" import math import torch from torch.optim.optimizer import Optimizer class AdamW(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= eps: raise ValueError('Invalid epsilon value: {}'.format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super(AdamW, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue p.data.mul_(1 - group['lr'] * group['weight_decay']) grad = p.grad if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(p) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss # File: pytorch-image-models-main/timm/optim/adan.py """""" import math import torch from torch.optim import Optimizer class Adan(Optimizer): def __init__(self, params, lr=0.001, betas=(0.98, 0.92, 0.99), eps=1e-08, weight_decay=0.0, no_prox=False): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= eps: raise ValueError('Invalid epsilon value: {}'.format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) if not 0.0 <= betas[2] < 1.0: raise ValueError('Invalid beta parameter at index 2: {}'.format(betas[2])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, no_prox=no_prox) super(Adan, self).__init__(params, defaults) @torch.no_grad() def restart_opt(self): for group in self.param_groups: group['step'] = 0 for p in group['params']: if p.requires_grad: state = self.state[p] state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) state['exp_avg_diff'] = torch.zeros_like(p) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: (beta1, beta2, beta3) = group['betas'] if 'step' in group: group['step'] += 1 else: group['step'] = 1 bias_correction1 = 1.0 - beta1 ** group['step'] bias_correction2 = 1.0 - beta2 ** group['step'] bias_correction3 = 1.0 - beta3 ** group['step'] for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] if len(state) == 0: state['exp_avg'] = torch.zeros_like(p) state['exp_avg_diff'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) state['pre_grad'] = grad.clone() (exp_avg, exp_avg_sq, exp_avg_diff) = (state['exp_avg'], state['exp_avg_diff'], state['exp_avg_sq']) grad_diff = grad - state['pre_grad'] exp_avg.lerp_(grad, 1.0 - beta1) exp_avg_diff.lerp_(grad_diff, 1.0 - beta2) update = grad + beta2 * grad_diff exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1.0 - beta3) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction3)).add_(group['eps']) update = (exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2).div_(denom) if group['no_prox']: p.data.mul_(1 - group['lr'] * group['weight_decay']) p.add_(update, alpha=-group['lr']) else: p.add_(update, alpha=-group['lr']) p.data.div_(1 + group['lr'] * group['weight_decay']) state['pre_grad'].copy_(grad) return loss # File: pytorch-image-models-main/timm/optim/lamb.py """""" import math import torch from torch.optim import Optimizer class Lamb(Optimizer): def __init__(self, params, lr=0.001, bias_correction=True, betas=(0.9, 0.999), eps=1e-06, weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, trust_clip=trust_clip, always_adapt=always_adapt) super().__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) global_grad_norm = torch.zeros(1, device=device) for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') global_grad_norm.add_(grad.pow(2).sum()) global_grad_norm = torch.sqrt(global_grad_norm) max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) clip_global_grad_norm = torch.where(global_grad_norm > max_grad_norm, global_grad_norm / max_grad_norm, one_tensor) for group in self.param_groups: bias_correction = 1 if group['bias_correction'] else 0 (beta1, beta2) = group['betas'] grad_averaging = 1 if group['grad_averaging'] else 0 beta3 = 1 - beta1 if grad_averaging else 1.0 if 'step' in group: group['step'] += 1 else: group['step'] = 1 if bias_correction: bias_correction1 = 1 - beta1 ** group['step'] bias_correction2 = 1 - beta2 ** group['step'] else: (bias_correction1, bias_correction2) = (1.0, 1.0) for p in group['params']: if p.grad is None: continue grad = p.grad.div_(clip_global_grad_norm) state = self.state[p] if len(state) == 0: state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) exp_avg.mul_(beta1).add_(grad, alpha=beta3) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) update = (exp_avg / bias_correction1).div_(denom) weight_decay = group['weight_decay'] if weight_decay != 0: update.add_(p, alpha=weight_decay) if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = update.norm(2.0) trust_ratio = torch.where(w_norm > 0, torch.where(g_norm > 0, w_norm / g_norm, one_tensor), one_tensor) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio, one_tensor) update.mul_(trust_ratio) p.add_(update, alpha=-group['lr']) return loss # File: pytorch-image-models-main/timm/optim/lars.py """""" import torch from torch.optim.optimizer import Optimizer class Lars(Optimizer): def __init__(self, params, lr=1.0, momentum=0, dampening=0, weight_decay=0, nesterov=False, trust_coeff=0.001, eps=1e-08, trust_clip=False, always_adapt=False): if lr < 0.0: raise ValueError(f'Invalid learning rate: {lr}') if momentum < 0.0: raise ValueError(f'Invalid momentum value: {momentum}') if weight_decay < 0.0: raise ValueError(f'Invalid weight_decay value: {weight_decay}') if nesterov and (momentum <= 0 or dampening != 0): raise ValueError('Nesterov momentum requires a momentum and zero dampening') defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, trust_coeff=trust_coeff, eps=eps, trust_clip=trust_clip, always_adapt=always_adapt) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) trust_ratio = torch.where(w_norm > 0, torch.where(g_norm > 0, trust_ratio, one_tensor), one_tensor) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1.0 - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss # File: pytorch-image-models-main/timm/optim/lion.py """""" from typing import List import torch from torch.optim.optimizer import Optimizer class Lion(Optimizer): def __init__(self, params, lr=0.0001, betas=(0.9, 0.99), weight_decay=0.0, maximize=False, foreach=None): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay, foreach=foreach, maximize=maximize) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('maximize', False) group.setdefault('foreach', None) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] (beta1, beta2) = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('Lion does not support sparse gradients') grads.append(p.grad) state = self.state[p] if len(state) == 0: state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) lion(params_with_grad, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], maximize=group['maximize'], foreach=group['foreach']) return loss def lion(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], maximize: bool=False, foreach: bool=None, *, beta1: float, beta2: float, lr: float, weight_decay: float): if foreach is None: foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and (not torch.jit.is_scripting()): func = _multi_tensor_lion else: func = _single_tensor_lion func(params, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, maximize=maximize) def _single_tensor_lion(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, maximize: bool): for (i, param) in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] if torch.is_complex(param): grad = torch.view_as_real(grad) exp_avg = torch.view_as_real(exp_avg) param = torch.view_as_real(param) param.mul_(1 - lr * weight_decay) update = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) param.add_(torch.sign(update), alpha=-lr) exp_avg.lerp_(grad, 1 - beta2) def _multi_tensor_lion(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, maximize: bool): if len(params) == 0: return if maximize: grads = torch._foreach_neg(tuple(grads)) grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] torch._foreach_mul_(params, 1 - lr * weight_decay) updates = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(updates, grads, alpha=1 - beta1) updates = [u.sign() for u in updates] torch._foreach_add_(params, updates, alpha=-lr) torch._foreach_mul_(exp_avgs, beta2) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta2) # File: pytorch-image-models-main/timm/optim/lookahead.py """""" from collections import OrderedDict from typing import Callable, Dict import torch from torch.optim.optimizer import Optimizer from collections import defaultdict class Lookahead(Optimizer): def __init__(self, base_optimizer, alpha=0.5, k=6): self._optimizer_step_pre_hooks: Dict[int, Callable] = OrderedDict() self._optimizer_step_post_hooks: Dict[int, Callable] = OrderedDict() if not 0.0 <= alpha <= 1.0: raise ValueError(f'Invalid slow update rate: {alpha}') if not 1 <= k: raise ValueError(f'Invalid lookahead steps: {k}') defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) self._base_optimizer = base_optimizer self.param_groups = base_optimizer.param_groups self.defaults = base_optimizer.defaults self.defaults.update(defaults) self.state = defaultdict(dict) for (name, default) in defaults.items(): for group in self._base_optimizer.param_groups: group.setdefault(name, default) @torch.no_grad() def update_slow(self, group): for fast_p in group['params']: if fast_p.grad is None: continue param_state = self._base_optimizer.state[fast_p] if 'lookahead_slow_buff' not in param_state: param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) param_state['lookahead_slow_buff'].copy_(fast_p) slow = param_state['lookahead_slow_buff'] slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) fast_p.copy_(slow) def sync_lookahead(self): for group in self._base_optimizer.param_groups: self.update_slow(group) @torch.no_grad() def step(self, closure=None): loss = self._base_optimizer.step(closure) for group in self._base_optimizer.param_groups: group['lookahead_step'] += 1 if group['lookahead_step'] % group['lookahead_k'] == 0: self.update_slow(group) return loss def state_dict(self): return self._base_optimizer.state_dict() def load_state_dict(self, state_dict): self._base_optimizer.load_state_dict(state_dict) self.param_groups = self._base_optimizer.param_groups # File: pytorch-image-models-main/timm/optim/madgrad.py """""" import math from typing import TYPE_CHECKING, Any, Callable, Optional import torch import torch.optim if TYPE_CHECKING: from torch.optim.optimizer import _params_t else: _params_t = Any class MADGRAD(torch.optim.Optimizer): def __init__(self, params: _params_t, lr: float=0.01, momentum: float=0.9, weight_decay: float=0, eps: float=1e-06, decoupled_decay: bool=False): if momentum < 0 or momentum >= 1: raise ValueError(f'Momentum {momentum} must be in the range [0,1]') if lr <= 0: raise ValueError(f'Learning rate {lr} must be positive') if weight_decay < 0: raise ValueError(f'Weight decay {weight_decay} must be non-negative') if eps < 0: raise ValueError(f'Eps must be non-negative') defaults = dict(lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) super().__init__(params, defaults) @property def supports_memory_efficient_fp16(self) -> bool: return False @property def supports_flat_params(self) -> bool: return True @torch.no_grad() def step(self, closure: Optional[Callable[[], float]]=None) -> Optional[float]: loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: eps = group['eps'] lr = group['lr'] + eps weight_decay = group['weight_decay'] momentum = group['momentum'] ck = 1 - momentum for p in group['params']: if p.grad is None: continue grad = p.grad if momentum != 0.0 and grad.is_sparse: raise RuntimeError('momentum != 0 is not compatible with sparse gradients') state = self.state[p] if len(state) == 0: state['step'] = 0 state['grad_sum_sq'] = torch.zeros_like(p) state['s'] = torch.zeros_like(p) if momentum != 0: state['x0'] = torch.clone(p).detach() state['step'] += 1 grad_sum_sq = state['grad_sum_sq'] s = state['s'] lamb = lr * math.sqrt(state['step']) if weight_decay != 0: if group['decoupled_decay']: p.mul_(1.0 - group['lr'] * weight_decay) else: if grad.is_sparse: raise RuntimeError('weight_decay option is not compatible with sparse gradients') grad.add_(p, alpha=weight_decay) if grad.is_sparse: grad = grad.coalesce() grad_val = grad._values() p_masked = p.sparse_mask(grad) grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) s_masked = s.sparse_mask(grad) rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) grad_sq = grad * grad grad_sum_sq.add_(grad_sq, alpha=lamb) grad_sum_sq_masked.add_(grad_sq, alpha=lamb) rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) s.add_(grad, alpha=lamb) s_masked._values().add_(grad_val, alpha=lamb) p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) p_masked._values().add_(p_kp1_masked_vals, alpha=-1) p.add_(p_masked, alpha=-1) else: if momentum == 0: rms = grad_sum_sq.pow(1 / 3).add_(eps) x0 = p.addcdiv(s, rms, value=1) else: x0 = state['x0'] grad_sum_sq.addcmul_(grad, grad, value=lamb) rms = grad_sum_sq.pow(1 / 3).add_(eps) s.add_(grad, alpha=lamb) if momentum == 0: p.copy_(x0.addcdiv(s, rms, value=-1)) else: z = x0.addcdiv(s, rms, value=-1) p.mul_(1 - ck).add_(z, alpha=ck) return loss # File: pytorch-image-models-main/timm/optim/nadam.py import math import torch from torch.optim.optimizer import Optimizer class Nadam(Optimizer): def __init__(self, params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, schedule_decay=0.004): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay) super(Nadam, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] if len(state) == 0: state['step'] = 0 state['m_schedule'] = 1.0 state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) m_schedule = state['m_schedule'] schedule_decay = group['schedule_decay'] (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] eps = group['eps'] state['step'] += 1 t = state['step'] bias_correction2 = 1 - beta2 ** t if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) momentum_cache_t = beta1 * (1.0 - 0.5 * 0.96 ** (t * schedule_decay)) momentum_cache_t_1 = beta1 * (1.0 - 0.5 * 0.96 ** ((t + 1) * schedule_decay)) m_schedule_new = m_schedule * momentum_cache_t m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 state['m_schedule'] = m_schedule_new exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) p.addcdiv_(grad, denom, value=-group['lr'] * (1.0 - momentum_cache_t) / (1.0 - m_schedule_new)) p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1.0 - m_schedule_next)) return loss # File: pytorch-image-models-main/timm/optim/nadamw.py """""" import math from typing import List, Optional import torch from torch import Tensor class NAdamW(torch.optim.Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, maximize: bool=False, foreach: Optional[bool]=None, capturable: bool=False): if not 0.0 <= lr: raise ValueError(f'Invalid learning rate: {lr}') if not 0.0 <= eps: raise ValueError(f'Invalid epsilon value: {eps}') if not 0.0 <= betas[0] < 1.0: raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}') if not 0.0 <= betas[1] < 1.0: raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}') if not 0.0 <= weight_decay: raise ValueError(f'Invalid weight_decay value: {weight_decay}') defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, foreach=foreach, maximize=maximize, capturable=capturable) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) state_values = list(self.state.values()) step_is_tensor = len(state_values) != 0 and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) @torch.no_grad() def step(self, closure=None): self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] state_steps = [] (beta1, beta2) = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('NAdamW does not support sparse gradients') grads.append(p.grad) state = self.state[p] if len(state) == 0: state['step'] = torch.tensor(0.0) state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) state_steps.append(state['step']) nadamw(params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=group['maximize'], capturable=group['capturable']) return loss def nadamw(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], foreach: Optional[bool]=None, capturable: bool=False, *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool) -> None: if not all((isinstance(t, torch.Tensor) for t in state_steps)): raise RuntimeError('API has changed, `state_steps` argument must contain a list of' + ' singleton tensors') if foreach is None: foreach = True if foreach and (not torch.jit.is_scripting()): func = _multi_tensor_nadamw else: func = _single_tensor_nadamw func(params, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, capturable=capturable) def _single_tensor_nadamw(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool): for (i, param) in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step_t = state_steps[i] step_t += 1 param.mul_(1.0 - lr * weight_decay) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if capturable: step = step_t bias_correction1 = 1 - torch.pow(beta1, step) bias_correction2 = 1 - torch.pow(beta2, step) step_size = lr / bias_correction1 step_size_neg = step_size.neg() bias_correction2_sqrt = bias_correction2.sqrt() exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) param.addcdiv_(exp_avg, denom) else: step = step_t.item() bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step step_size = lr / bias_correction1 bias_correction2_sqrt = math.sqrt(bias_correction2) exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) param.addcdiv_(exp_avg, denom, value=-step_size) def _multi_tensor_nadamw(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool): if len(params) == 0: return if capturable: assert all((p.is_cuda and step.is_cuda for (p, step) in zip(params, state_steps))), 'If capturable=True, params and state_steps must be CUDA tensors.' if maximize: grads = torch._foreach_neg(tuple(grads)) grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] torch._foreach_add_(state_steps, 1) torch._foreach_mul_(params, 1 - lr * weight_decay) torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if capturable: bias_correction1 = [torch.pow(beta1, step) for step in state_steps] bias_correction2 = [torch.pow(beta2, step) for step in state_steps] torch._foreach_sub_(bias_correction1, 1) torch._foreach_sub_(bias_correction2, 1) torch._foreach_neg_(bias_correction1) torch._foreach_neg_(bias_correction2) step_size = torch._foreach_div(bias_correction1, lr) torch._foreach_reciprocal_(step_size) torch._foreach_neg_(step_size) bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size)) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) torch._foreach_addcdiv_(params, exp_avgs, denom) else: bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] step_size = [lr / bc * -1 for bc in bias_correction1] bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size) # File: pytorch-image-models-main/timm/optim/nvnovograd.py """""" import torch from torch.optim.optimizer import Optimizer import math class NvNovoGrad(Optimizer): def __init__(self, params, lr=0.001, betas=(0.95, 0.98), eps=1e-08, weight_decay=0, grad_averaging=False, amsgrad=False): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= eps: raise ValueError('Invalid epsilon value: {}'.format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, amsgrad=amsgrad) super(NvNovoGrad, self).__init__(params, defaults) def __setstate__(self, state): super(NvNovoGrad, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Sparse gradients are not supported.') amsgrad = group['amsgrad'] state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) if amsgrad: state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 norm = torch.sum(torch.pow(grad, 2)) if exp_avg_sq == 0: exp_avg_sq.copy_(norm) else: exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) grad.div_(denom) if group['weight_decay'] != 0: grad.add_(p, alpha=group['weight_decay']) if group['grad_averaging']: grad.mul_(1 - beta1) exp_avg.mul_(beta1).add_(grad) p.add_(exp_avg, alpha=-group['lr']) return loss # File: pytorch-image-models-main/timm/optim/optim_factory.py """""" import logging from itertools import islice from typing import Optional, Callable, Tuple import torch import torch.nn as nn import torch.optim as optim from timm.models import group_parameters from .adabelief import AdaBelief from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .adan import Adan from .lamb import Lamb from .lars import Lars from .lion import Lion from .lookahead import Lookahead from .madgrad import MADGRAD from .nadam import Nadam from .nadamw import NAdamW from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from .sgdw import SGDW _logger = logging.getLogger(__name__) _DEFAULT_FOREACH = {'lion'} def param_groups_weight_decay(model: nn.Module, weight_decay=1e-05, no_weight_decay_list=()): no_weight_decay_list = set(no_weight_decay_list) decay = [] no_decay = [] for (name, param) in model.named_parameters(): if not param.requires_grad: continue if param.ndim <= 1 or name.endswith('.bias') or name in no_weight_decay_list: no_decay.append(param) else: decay.append(param) return [{'params': no_decay, 'weight_decay': 0.0}, {'params': decay, 'weight_decay': weight_decay}] def _group(it, size): it = iter(it) return iter(lambda : tuple(islice(it, size)), ()) def _layer_map(model, layers_per_group=12, num_groups=None): def _in_head(n, hp): if not hp: return True elif isinstance(hp, (tuple, list)): return any([n.startswith(hpi) for hpi in hp]) else: return n.startswith(hp) head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None) names_trunk = [] names_head = [] for (n, _) in model.named_parameters(): names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n) num_trunk_layers = len(names_trunk) if num_groups is not None: layers_per_group = -(num_trunk_layers // -num_groups) names_trunk = list(_group(names_trunk, layers_per_group)) num_trunk_groups = len(names_trunk) layer_map = {n: i for (i, l) in enumerate(names_trunk) for n in l} layer_map.update({n: num_trunk_groups for n in names_head}) return layer_map def param_groups_layer_decay(model: nn.Module, weight_decay: float=0.05, no_weight_decay_list: Tuple[str]=(), layer_decay: float=0.75, end_layer_decay: Optional[float]=None, verbose: bool=False): no_weight_decay_list = set(no_weight_decay_list) param_group_names = {} param_groups = {} if hasattr(model, 'group_matcher'): layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True) else: layer_map = _layer_map(model) num_layers = max(layer_map.values()) + 1 layer_max = num_layers - 1 layer_scales = list((layer_decay ** (layer_max - i) for i in range(num_layers))) for (name, param) in model.named_parameters(): if not param.requires_grad: continue if param.ndim == 1 or name in no_weight_decay_list: g_decay = 'no_decay' this_decay = 0.0 else: g_decay = 'decay' this_decay = weight_decay layer_id = layer_map.get(name, layer_max) group_name = 'layer_%d_%s' % (layer_id, g_decay) if group_name not in param_groups: this_scale = layer_scales[layer_id] param_group_names[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'param_names': []} param_groups[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'params': []} param_group_names[group_name]['param_names'].append(name) param_groups[group_name]['params'].append(param) if verbose: import json _logger.info('parameter groups: \n%s' % json.dumps(param_group_names, indent=2)) return list(param_groups.values()) def optimizer_kwargs(cfg): kwargs = dict(opt=cfg.opt, lr=cfg.lr, weight_decay=cfg.weight_decay, momentum=cfg.momentum) if getattr(cfg, 'opt_eps', None) is not None: kwargs['eps'] = cfg.opt_eps if getattr(cfg, 'opt_betas', None) is not None: kwargs['betas'] = cfg.opt_betas if getattr(cfg, 'layer_decay', None) is not None: kwargs['layer_decay'] = cfg.layer_decay if getattr(cfg, 'opt_args', None) is not None: kwargs.update(cfg.opt_args) if getattr(cfg, 'opt_foreach', None) is not None: kwargs['foreach'] = cfg.opt_foreach return kwargs def create_optimizer(args, model, filter_bias_and_bn=True): return create_optimizer_v2(model, **optimizer_kwargs(cfg=args), filter_bias_and_bn=filter_bias_and_bn) def create_optimizer_v2(model_or_params, opt: str='sgd', lr: Optional[float]=None, weight_decay: float=0.0, momentum: float=0.9, foreach: Optional[bool]=None, filter_bias_and_bn: bool=True, layer_decay: Optional[float]=None, param_group_fn: Optional[Callable]=None, **kwargs): if isinstance(model_or_params, nn.Module): no_weight_decay = {} if hasattr(model_or_params, 'no_weight_decay'): no_weight_decay = model_or_params.no_weight_decay() if param_group_fn: parameters = param_group_fn(model_or_params) elif layer_decay is not None: parameters = param_groups_layer_decay(model_or_params, weight_decay=weight_decay, layer_decay=layer_decay, no_weight_decay_list=no_weight_decay) weight_decay = 0.0 elif weight_decay and filter_bias_and_bn: parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay) weight_decay = 0.0 else: parameters = model_or_params.parameters() else: parameters = model_or_params opt_lower = opt.lower() opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower.startswith('fused'): try: from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD has_apex = True except ImportError: has_apex = False assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' if opt_lower.startswith('bnb'): try: import bitsandbytes as bnb has_bnb = True except ImportError: has_bnb = False assert has_bnb and torch.cuda.is_available(), 'bitsandbytes and CUDA required for bnb optimizers' opt_args = dict(weight_decay=weight_decay, **kwargs) if lr is not None: opt_args.setdefault('lr', lr) if foreach is None: if opt in _DEFAULT_FOREACH: opt_args.setdefault('foreach', True) else: opt_args['foreach'] = foreach if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'sgdw' or opt_lower == 'nesterovw': opt_args.pop('eps', None) optimizer = SGDW(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'momentumw': opt_args.pop('eps', None) optimizer = SGDW(parameters, momentum=momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'nadam': try: optimizer = optim.Nadam(parameters, **opt_args) except AttributeError: optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'nadamw': optimizer = NAdamW(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamax': optimizer = optim.Adamax(parameters, **opt_args) elif opt_lower == 'adabelief': optimizer = AdaBelief(parameters, rectify=False, **opt_args) elif opt_lower == 'radabelief': optimizer = AdaBelief(parameters, rectify=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adagrad': opt_args.setdefault('eps', 1e-08) optimizer = optim.Adagrad(parameters, **opt_args) elif opt_lower == 'adafactor': optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adanp': optimizer = Adan(parameters, no_prox=False, **opt_args) elif opt_lower == 'adanw': optimizer = Adan(parameters, no_prox=True, **opt_args) elif opt_lower == 'lamb': optimizer = Lamb(parameters, **opt_args) elif opt_lower == 'lambc': optimizer = Lamb(parameters, trust_clip=True, **opt_args) elif opt_lower == 'larc': optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) elif opt_lower == 'lars': optimizer = Lars(parameters, momentum=momentum, **opt_args) elif opt_lower == 'nlarc': optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) elif opt_lower == 'nlars': optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'madgrad': optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) elif opt_lower == 'madgradw': optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': optimizer = NvNovoGrad(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) elif opt_lower == 'lion': opt_args.pop('eps', None) optimizer = Lion(parameters, **opt_args) elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) elif opt_lower == 'fusedsgd': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'fusedmomentum': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) elif opt_lower == 'fusedadam': optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) elif opt_lower == 'fusedadamw': optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) elif opt_lower == 'fusedlamb': optimizer = FusedLAMB(parameters, **opt_args) elif opt_lower == 'fusednovograd': opt_args.setdefault('betas', (0.95, 0.98)) optimizer = FusedNovoGrad(parameters, **opt_args) elif opt_lower == 'bnbsgd': opt_args.pop('eps', None) optimizer = bnb.optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'bnbsgd8bit': opt_args.pop('eps', None) optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'bnbmomentum': opt_args.pop('eps', None) optimizer = bnb.optim.SGD(parameters, momentum=momentum, **opt_args) elif opt_lower == 'bnbmomentum8bit': opt_args.pop('eps', None) optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, **opt_args) elif opt_lower == 'bnbadam': optimizer = bnb.optim.Adam(parameters, **opt_args) elif opt_lower == 'bnbadam8bit': optimizer = bnb.optim.Adam8bit(parameters, **opt_args) elif opt_lower == 'bnbadamw': optimizer = bnb.optim.AdamW(parameters, **opt_args) elif opt_lower == 'bnbadamw8bit': optimizer = bnb.optim.AdamW8bit(parameters, **opt_args) elif opt_lower == 'bnblamb': optimizer = bnb.optim.LAMB(parameters, **opt_args) elif opt_lower == 'bnblamb8bit': optimizer = bnb.optim.LAMB8bit(parameters, **opt_args) elif opt_lower == 'bnblars': optimizer = bnb.optim.LARS(parameters, **opt_args) elif opt_lower == 'bnblarsb8bit': optimizer = bnb.optim.LAMB8bit(parameters, **opt_args) elif opt_lower == 'bnblion': optimizer = bnb.optim.Lion(parameters, **opt_args) elif opt_lower == 'bnblion8bit': optimizer = bnb.optim.Lion8bit(parameters, **opt_args) else: assert False and 'Invalid optimizer' raise ValueError if len(opt_split) > 1: if opt_split[0] == 'lookahead': optimizer = Lookahead(optimizer) return optimizer # File: pytorch-image-models-main/timm/optim/radam.py """""" import math import torch from torch.optim.optimizer import Optimizer class RAdam(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)]) super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): super(RAdam, self).__setstate__(state) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_fp32 = p.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_fp32) state['exp_avg_sq'] = torch.zeros_like(p_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) state['step'] += 1 buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: (num_sma, step_size) = (buffered[1], buffered[2]) else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma if num_sma >= 5: step_size = group['lr'] * math.sqrt((1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) else: step_size = group['lr'] / (1 - beta1 ** state['step']) buffered[2] = step_size if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) if num_sma >= 5: denom = exp_avg_sq.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: p_fp32.add_(exp_avg, alpha=-step_size) p.copy_(p_fp32) return loss # File: pytorch-image-models-main/timm/optim/rmsprop_tf.py """""" import torch from torch.optim import Optimizer class RMSpropTF(Optimizer): def __init__(self, params, lr=0.01, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0.0, centered=False, decoupled_decay=False, lr_in_momentum=True): if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= eps: raise ValueError('Invalid epsilon value: {}'.format(eps)) if not 0.0 <= momentum: raise ValueError('Invalid momentum value: {}'.format(momentum)) if not 0.0 <= weight_decay: raise ValueError('Invalid weight_decay value: {}'.format(weight_decay)) if not 0.0 <= alpha: raise ValueError('Invalid alpha value: {}'.format(alpha)) defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) super(RMSpropTF, self).__init__(params, defaults) def __setstate__(self, state): super(RMSpropTF, self).__setstate__(state) for group in self.param_groups: group.setdefault('momentum', 0) group.setdefault('centered', False) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('RMSprop does not support sparse gradients') state = self.state[p] if len(state) == 0: state['step'] = 0 state['square_avg'] = torch.ones_like(p) if group['momentum'] > 0: state['momentum_buffer'] = torch.zeros_like(p) if group['centered']: state['grad_avg'] = torch.zeros_like(p) square_avg = state['square_avg'] one_minus_alpha = 1.0 - group['alpha'] state['step'] += 1 if group['weight_decay'] != 0: if group['decoupled_decay']: p.mul_(1.0 - group['lr'] * group['weight_decay']) else: grad = grad.add(p, alpha=group['weight_decay']) square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) if group['centered']: grad_avg = state['grad_avg'] grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() else: avg = square_avg.add(group['eps']).sqrt_() if group['momentum'] > 0: buf = state['momentum_buffer'] if group['lr_in_momentum']: buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) p.add_(-buf) else: buf.mul_(group['momentum']).addcdiv_(grad, avg) p.add_(buf, alpha=-group['lr']) else: p.addcdiv_(grad, avg, value=-group['lr']) return loss # File: pytorch-image-models-main/timm/optim/sgdp.py """""" import torch import torch.nn.functional as F from torch.optim.optimizer import Optimizer, required import math from .adamp import projection class SGDP(Optimizer): def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False, eps=1e-08, delta=0.1, wd_ratio=0.1): defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) super(SGDP, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] if len(state) == 0: state['momentum'] = torch.zeros_like(p) buf = state['momentum'] buf.mul_(momentum).add_(grad, alpha=1.0 - dampening) if nesterov: d_p = grad + momentum * buf else: d_p = buf wd_ratio = 1.0 if len(p.shape) > 1: (d_p, wd_ratio) = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) if weight_decay != 0: p.mul_(1.0 - group['lr'] * group['weight_decay'] * wd_ratio / (1 - momentum)) p.add_(d_p, alpha=-group['lr']) return loss # File: pytorch-image-models-main/timm/optim/sgdw.py from functools import update_wrapper, wraps import torch from torch import Tensor from torch.optim.optimizer import Optimizer try: from torch.optim.optimizer import _use_grad_for_differentiable, _default_to_fused_or_foreach has_recent_pt = True except ImportError: has_recent_pt = False from typing import List, Optional __all__ = ['SGDW', 'sgdw'] class SGDW(Optimizer): def __init__(self, params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False, *, maximize: bool=False, foreach: Optional[bool]=None, differentiable: bool=False): if lr < 0.0: raise ValueError(f'Invalid learning rate: {lr}') if momentum < 0.0: raise ValueError(f'Invalid momentum value: {momentum}') if weight_decay < 0.0: raise ValueError(f'Invalid weight_decay value: {weight_decay}') defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, maximize=maximize, foreach=foreach, differentiable=differentiable) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError('Nesterov momentum requires a momentum and zero dampening') super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) group.setdefault('maximize', False) group.setdefault('foreach', None) group.setdefault('differentiable', False) def _init_group(self, group, params_with_grad, d_p_list, momentum_buffer_list): has_sparse_grad = False for p in group['params']: if p.grad is not None: params_with_grad.append(p) d_p_list.append(p.grad) if p.grad.is_sparse: has_sparse_grad = True state = self.state[p] if 'momentum_buffer' not in state: momentum_buffer_list.append(None) else: momentum_buffer_list.append(state['momentum_buffer']) return has_sparse_grad @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] d_p_list = [] momentum_buffer_list = [] has_sparse_grad = self._init_group(group, params_with_grad, d_p_list, momentum_buffer_list) sgdw(params_with_grad, d_p_list, momentum_buffer_list, weight_decay=group['weight_decay'], momentum=group['momentum'], lr=group['lr'], dampening=group['dampening'], nesterov=group['nesterov'], maximize=group['maximize'], has_sparse_grad=has_sparse_grad, foreach=group['foreach']) for (p, momentum_buffer) in zip(params_with_grad, momentum_buffer_list): state = self.state[p] state['momentum_buffer'] = momentum_buffer return loss def sgdw(params: List[Tensor], d_p_list: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], has_sparse_grad: bool=None, foreach: Optional[bool]=None, *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool): if has_recent_pt and hasattr(Optimizer, '_group_tensors_by_device_and_dtype'): if foreach is None: if not torch.jit.is_scripting(): (_, foreach) = _default_to_fused_or_foreach(params, differentiable=False, use_fused=False) else: foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') else: foreach = False if foreach and (not torch.jit.is_scripting()): func = _multi_tensor_sgdw else: func = _single_tensor_sgdw func(params, d_p_list, momentum_buffer_list, weight_decay=weight_decay, momentum=momentum, lr=lr, dampening=dampening, nesterov=nesterov, has_sparse_grad=has_sparse_grad, maximize=maximize) def _single_tensor_sgdw(params: List[Tensor], d_p_list: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool, has_sparse_grad: bool): for (i, param) in enumerate(params): d_p = d_p_list[i] if not maximize else -d_p_list[i] param.mul_(1.0 - lr * weight_decay) if momentum != 0: buf = momentum_buffer_list[i] if buf is None: buf = torch.clone(d_p).detach() momentum_buffer_list[i] = buf else: buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf param.add_(d_p, alpha=-lr) def _multi_tensor_sgdw(params: List[Tensor], grads: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool, has_sparse_grad: bool): if len(params) == 0: return grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, momentum_buffer_list], with_indices=True) for ((device_params, device_grads, device_momentum_buffer_list), indices) in grouped_tensors.values(): device_has_sparse_grad = has_sparse_grad and any((grad.is_sparse for grad in device_grads)) if maximize: device_grads = torch._foreach_neg(device_grads) torch._foreach_mul_(params, 1.0 - lr * weight_decay) if momentum != 0: bufs = [] all_states_with_momentum_buffer = True for i in range(len(device_momentum_buffer_list)): if device_momentum_buffer_list[i] is None: all_states_with_momentum_buffer = False break else: bufs.append(device_momentum_buffer_list[i]) if all_states_with_momentum_buffer: torch._foreach_mul_(bufs, momentum) torch._foreach_add_(bufs, device_grads, alpha=1 - dampening) else: bufs = [] for i in range(len(device_momentum_buffer_list)): if device_momentum_buffer_list[i] is None: buf = device_momentum_buffer_list[i] = momentum_buffer_list[indices[i]] = torch.clone(device_grads[i]).detach() else: buf = device_momentum_buffer_list[i] buf.mul_(momentum).add_(device_grads[i], alpha=1 - dampening) bufs.append(buf) if nesterov: torch._foreach_add_(device_grads, bufs, alpha=momentum) else: device_grads = bufs if not device_has_sparse_grad: torch._foreach_add_(device_params, device_grads, alpha=-lr) else: for i in range(len(device_params)): device_params[i].add_(device_grads[i], alpha=-lr) # File: pytorch-image-models-main/timm/scheduler/cosine_lr.py """""" import logging import math import numpy as np import torch from typing import List from .scheduler import Scheduler _logger = logging.getLogger(__name__) class CosineLRScheduler(Scheduler): def __init__(self, optimizer: torch.optim.Optimizer, t_initial: int, lr_min: float=0.0, cycle_mul: float=1.0, cycle_decay: float=1.0, cycle_limit: int=1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True) -> None: super().__init__(optimizer, param_group_field='lr', t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and (cycle_decay == 1): _logger.warning('Cosine annealing scheduler will have no effect on the learning rate since t_initial = t_mul = eta_mul = 1.') self.t_initial = t_initial self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - self.t_initial * i gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) for lr_max in lr_max_values] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) # File: pytorch-image-models-main/timm/scheduler/multistep_lr.py """""" import torch import bisect from timm.scheduler.scheduler import Scheduler from typing import List class MultiStepLRScheduler(Scheduler): def __init__(self, optimizer: torch.optim.Optimizer, decay_t: List[int], decay_rate: float=1.0, warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True) -> None: super().__init__(optimizer, param_group_field='lr', t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def get_curr_decay_steps(self, t): return bisect.bisect_right(self.decay_t, t + 1) def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * self.decay_rate ** self.get_curr_decay_steps(t) for v in self.base_values] return lrs # File: pytorch-image-models-main/timm/scheduler/plateau_lr.py """""" import torch from typing import List from .scheduler import Scheduler class PlateauLRScheduler(Scheduler): def __init__(self, optimizer, decay_rate=0.1, patience_t=10, verbose=True, threshold=0.0001, cooldown_t=0, warmup_t=0, warmup_lr_init=0, lr_min=0, mode='max', noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize=True): super().__init__(optimizer, 'lr', noise_range_t=noise_range_t, noise_type=noise_type, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=patience_t, factor=decay_rate, verbose=verbose, threshold=threshold, cooldown=cooldown_t, mode=mode, min_lr=lr_min) self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] self.restore_lr = None def state_dict(self): return {'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch} def load_state_dict(self, state_dict): self.lr_scheduler.best = state_dict['best'] if 'last_epoch' in state_dict: self.lr_scheduler.last_epoch = state_dict['last_epoch'] def step(self, epoch, metric=None): if epoch <= self.warmup_t: lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] super().update_groups(lrs) else: if self.restore_lr is not None: for (i, param_group) in enumerate(self.optimizer.param_groups): param_group['lr'] = self.restore_lr[i] self.restore_lr = None self.lr_scheduler.step(metric, epoch) if self._is_apply_noise(epoch): self._apply_noise(epoch) def step_update(self, num_updates: int, metric: float=None): return None def _apply_noise(self, epoch): noise = self._calculate_noise(epoch) restore_lr = [] for (i, param_group) in enumerate(self.optimizer.param_groups): old_lr = float(param_group['lr']) restore_lr.append(old_lr) new_lr = old_lr + old_lr * noise param_group['lr'] = new_lr self.restore_lr = restore_lr def _get_lr(self, t: int) -> List[float]: assert False, 'should not be called as step is overridden' # File: pytorch-image-models-main/timm/scheduler/poly_lr.py """""" import math import logging from typing import List import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class PolyLRScheduler(Scheduler): def __init__(self, optimizer: torch.optim.Optimizer, t_initial: int, power: float=0.5, lr_min: float=0.0, cycle_mul: float=1.0, cycle_decay: float=1.0, cycle_limit: int=1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True) -> None: super().__init__(optimizer, param_group_field='lr', t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and (cycle_decay == 1): _logger.warning('Cosine annealing scheduler will have no effect on the learning rate since t_initial = t_mul = eta_mul = 1.') self.t_initial = t_initial self.power = power self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - self.t_initial * i gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power for lr_max in lr_max_values] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) # File: pytorch-image-models-main/timm/scheduler/scheduler.py import abc from abc import ABC from typing import Any, Dict, List, Optional import torch class Scheduler(ABC): def __init__(self, optimizer: torch.optim.Optimizer, param_group_field: str, t_in_epochs: bool=True, noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize: bool=True) -> None: self.optimizer = optimizer self.param_group_field = param_group_field self._initial_param_group_field = f'initial_{param_group_field}' if initialize: for (i, group) in enumerate(self.optimizer.param_groups): if param_group_field not in group: raise KeyError(f'{param_group_field} missing from param_groups[{i}]') group.setdefault(self._initial_param_group_field, group[param_group_field]) else: for (i, group) in enumerate(self.optimizer.param_groups): if self._initial_param_group_field not in group: raise KeyError(f'{self._initial_param_group_field} missing from param_groups[{i}]') self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] self.metric = None self.t_in_epochs = t_in_epochs self.noise_range_t = noise_range_t self.noise_pct = noise_pct self.noise_type = noise_type self.noise_std = noise_std self.noise_seed = noise_seed if noise_seed is not None else 42 self.update_groups(self.base_values) def state_dict(self) -> Dict[str, Any]: return {key: value for (key, value) in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.__dict__.update(state_dict) @abc.abstractmethod def _get_lr(self, t: int) -> List[float]: pass def _get_values(self, t: int, on_epoch: bool=True) -> Optional[List[float]]: proceed = on_epoch and self.t_in_epochs or (not on_epoch and (not self.t_in_epochs)) if not proceed: return None return self._get_lr(t) def step(self, epoch: int, metric: float=None) -> None: self.metric = metric values = self._get_values(epoch, on_epoch=True) if values is not None: values = self._add_noise(values, epoch) self.update_groups(values) def step_update(self, num_updates: int, metric: float=None): self.metric = metric values = self._get_values(num_updates, on_epoch=False) if values is not None: values = self._add_noise(values, num_updates) self.update_groups(values) def update_groups(self, values): if not isinstance(values, (list, tuple)): values = [values] * len(self.optimizer.param_groups) for (param_group, value) in zip(self.optimizer.param_groups, values): if 'lr_scale' in param_group: param_group[self.param_group_field] = value * param_group['lr_scale'] else: param_group[self.param_group_field] = value def _add_noise(self, lrs, t): if self._is_apply_noise(t): noise = self._calculate_noise(t) lrs = [v + v * noise for v in lrs] return lrs def _is_apply_noise(self, t) -> bool: apply_noise = False if self.noise_range_t is not None: if isinstance(self.noise_range_t, (list, tuple)): apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] else: apply_noise = t >= self.noise_range_t return apply_noise def _calculate_noise(self, t) -> float: g = torch.Generator() g.manual_seed(self.noise_seed + t) if self.noise_type == 'normal': while True: noise = torch.randn(1, generator=g).item() if abs(noise) < self.noise_pct: return noise else: noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct return noise # File: pytorch-image-models-main/timm/scheduler/scheduler_factory.py """""" from typing import List, Optional, Union from torch.optim import Optimizer from .cosine_lr import CosineLRScheduler from .multistep_lr import MultiStepLRScheduler from .plateau_lr import PlateauLRScheduler from .poly_lr import PolyLRScheduler from .step_lr import StepLRScheduler from .tanh_lr import TanhLRScheduler def scheduler_kwargs(cfg, decreasing_metric: Optional[bool]=None): eval_metric = getattr(cfg, 'eval_metric', 'top1') if decreasing_metric is not None: plateau_mode = 'min' if decreasing_metric else 'max' else: plateau_mode = 'min' if 'loss' in eval_metric else 'max' kwargs = dict(sched=cfg.sched, num_epochs=getattr(cfg, 'epochs', 100), decay_epochs=getattr(cfg, 'decay_epochs', 30), decay_milestones=getattr(cfg, 'decay_milestones', [30, 60]), warmup_epochs=getattr(cfg, 'warmup_epochs', 5), cooldown_epochs=getattr(cfg, 'cooldown_epochs', 0), patience_epochs=getattr(cfg, 'patience_epochs', 10), decay_rate=getattr(cfg, 'decay_rate', 0.1), min_lr=getattr(cfg, 'min_lr', 0.0), warmup_lr=getattr(cfg, 'warmup_lr', 1e-05), warmup_prefix=getattr(cfg, 'warmup_prefix', False), noise=getattr(cfg, 'lr_noise', None), noise_pct=getattr(cfg, 'lr_noise_pct', 0.67), noise_std=getattr(cfg, 'lr_noise_std', 1.0), noise_seed=getattr(cfg, 'seed', 42), cycle_mul=getattr(cfg, 'lr_cycle_mul', 1.0), cycle_decay=getattr(cfg, 'lr_cycle_decay', 0.1), cycle_limit=getattr(cfg, 'lr_cycle_limit', 1), k_decay=getattr(cfg, 'lr_k_decay', 1.0), plateau_mode=plateau_mode, step_on_epochs=not getattr(cfg, 'sched_on_updates', False)) return kwargs def create_scheduler(args, optimizer: Optimizer, updates_per_epoch: int=0): return create_scheduler_v2(optimizer=optimizer, **scheduler_kwargs(args), updates_per_epoch=updates_per_epoch) def create_scheduler_v2(optimizer: Optimizer, sched: str='cosine', num_epochs: int=300, decay_epochs: int=90, decay_milestones: List[int]=(90, 180, 270), cooldown_epochs: int=0, patience_epochs: int=10, decay_rate: float=0.1, min_lr: float=0, warmup_lr: float=1e-05, warmup_epochs: int=0, warmup_prefix: bool=False, noise: Union[float, List[float]]=None, noise_pct: float=0.67, noise_std: float=1.0, noise_seed: int=42, cycle_mul: float=1.0, cycle_decay: float=0.1, cycle_limit: int=1, k_decay: float=1.0, plateau_mode: str='max', step_on_epochs: bool=True, updates_per_epoch: int=0): t_initial = num_epochs warmup_t = warmup_epochs decay_t = decay_epochs cooldown_t = cooldown_epochs if not step_on_epochs: assert updates_per_epoch > 0, 'updates_per_epoch must be set to number of dataloader batches' t_initial = t_initial * updates_per_epoch warmup_t = warmup_t * updates_per_epoch decay_t = decay_t * updates_per_epoch decay_milestones = [d * updates_per_epoch for d in decay_milestones] cooldown_t = cooldown_t * updates_per_epoch warmup_args = dict(warmup_lr_init=warmup_lr, warmup_t=warmup_t, warmup_prefix=warmup_prefix) if noise is not None: if isinstance(noise, (list, tuple)): noise_range = [n * t_initial for n in noise] if len(noise_range) == 1: noise_range = noise_range[0] else: noise_range = noise * t_initial else: noise_range = None noise_args = dict(noise_range_t=noise_range, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed) cycle_args = dict(cycle_mul=cycle_mul, cycle_decay=cycle_decay, cycle_limit=cycle_limit) lr_scheduler = None if sched == 'cosine': lr_scheduler = CosineLRScheduler(optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, k_decay=k_decay) elif sched == 'tanh': lr_scheduler = TanhLRScheduler(optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args) elif sched == 'step': lr_scheduler = StepLRScheduler(optimizer, decay_t=decay_t, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args) elif sched == 'multistep': lr_scheduler = MultiStepLRScheduler(optimizer, decay_t=decay_milestones, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args) elif sched == 'plateau': assert step_on_epochs, 'Plateau LR only supports step per epoch.' warmup_args.pop('warmup_prefix', False) lr_scheduler = PlateauLRScheduler(optimizer, decay_rate=decay_rate, patience_t=patience_epochs, cooldown_t=0, **warmup_args, lr_min=min_lr, mode=plateau_mode, **noise_args) elif sched == 'poly': lr_scheduler = PolyLRScheduler(optimizer, power=decay_rate, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, k_decay=k_decay, **cycle_args, **warmup_args, **noise_args) if hasattr(lr_scheduler, 'get_cycle_length'): t_with_cycles_and_cooldown = lr_scheduler.get_cycle_length() + cooldown_t if step_on_epochs: num_epochs = t_with_cycles_and_cooldown else: num_epochs = t_with_cycles_and_cooldown // updates_per_epoch return (lr_scheduler, num_epochs) # File: pytorch-image-models-main/timm/scheduler/step_lr.py """""" import math import torch from typing import List from .scheduler import Scheduler class StepLRScheduler(Scheduler): def __init__(self, optimizer: torch.optim.Optimizer, decay_t: float, decay_rate: float=1.0, warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True) -> None: super().__init__(optimizer, param_group_field='lr', t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * self.decay_rate ** (t // self.decay_t) for v in self.base_values] return lrs # File: pytorch-image-models-main/timm/scheduler/tanh_lr.py """""" import logging import math import numpy as np import torch from typing import List from .scheduler import Scheduler _logger = logging.getLogger(__name__) class TanhLRScheduler(Scheduler): def __init__(self, optimizer: torch.optim.Optimizer, t_initial: int, lb: float=-7.0, ub: float=3.0, lr_min: float=0.0, cycle_mul: float=1.0, cycle_decay: float=1.0, cycle_limit: int=1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True) -> None: super().__init__(optimizer, param_group_field='lr', t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize) assert t_initial > 0 assert lr_min >= 0 assert lb < ub assert cycle_limit >= 0 assert warmup_t >= 0 assert warmup_lr_init >= 0 self.lb = lb self.ub = ub self.t_initial = t_initial self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - self.t_initial * i if i < self.cycle_limit: gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] tr = t_curr / t_i lrs = [self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1.0 - tr) + self.ub * tr)) for lr_max in lr_max_values] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) # File: pytorch-image-models-main/train.py """""" import argparse import importlib import json import logging import os import time from collections import OrderedDict from contextlib import suppress from datetime import datetime from functools import partial import torch import torch.nn as nn import torchvision.utils import yaml from torch.nn.parallel import DistributedDataParallel as NativeDDP from timm import utils from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset from timm.layers import convert_splitbn_model, convert_sync_batchnorm, set_fast_norm from timm.loss import JsdCrossEntropy, SoftTargetCrossEntropy, BinaryCrossEntropy, LabelSmoothingCrossEntropy from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, model_parameters from timm.optim import create_optimizer_v2, optimizer_kwargs from timm.scheduler import create_scheduler_v2, scheduler_kwargs from timm.utils import ApexScaler, NativeScaler try: from apex import amp from apex.parallel import DistributedDataParallel as ApexDDP from apex.parallel import convert_syncbn_model has_apex = True except ImportError: has_apex = False has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass try: import wandb has_wandb = True except ImportError: has_wandb = False try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') _logger = logging.getLogger('train') config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', help='YAML config file specifying default arguments') parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') group = parser.add_argument_group('Dataset parameters') parser.add_argument('data', nargs='?', metavar='DIR', const=None, help='path to dataset (positional is *deprecated*, use --data-dir)') parser.add_argument('--data-dir', metavar='DIR', help='path to dataset (root dir)') parser.add_argument('--dataset', metavar='NAME', default='', help='dataset type + name ("/") (default: ImageFolder or ImageTar if empty)') group.add_argument('--train-split', metavar='NAME', default='train', help='dataset train split (default: train)') group.add_argument('--val-split', metavar='NAME', default='validation', help='dataset validation split (default: validation)') parser.add_argument('--train-num-samples', default=None, type=int, metavar='N', help='Manually specify num samples in train split, for IterableDatasets.') parser.add_argument('--val-num-samples', default=None, type=int, metavar='N', help='Manually specify num samples in validation split, for IterableDatasets.') group.add_argument('--dataset-download', action='store_true', default=False, help='Allow download of dataset for torch/ and tfds/ datasets that support it.') group.add_argument('--class-map', default='', type=str, metavar='FILENAME', help='path to class to idx mapping file (default: "")') group.add_argument('--input-img-mode', default=None, type=str, help='Dataset image conversion mode for input images.') group.add_argument('--input-key', default=None, type=str, help='Dataset key for input images.') group.add_argument('--target-key', default=None, type=str, help='Dataset key for target labels.') group = parser.add_argument_group('Model parameters') group.add_argument('--model', default='resnet50', type=str, metavar='MODEL', help='Name of model to train (default: "resnet50")') group.add_argument('--pretrained', action='store_true', default=False, help='Start with pretrained version of specified network (if avail)') group.add_argument('--pretrained-path', default=None, type=str, help='Load this checkpoint as if they were the pretrained weights (with adaptation).') group.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', help='Load this checkpoint into model after initialization (default: none)') group.add_argument('--resume', default='', type=str, metavar='PATH', help='Resume full model and optimizer state from checkpoint (default: none)') group.add_argument('--no-resume-opt', action='store_true', default=False, help='prevent resume of optimizer state when resuming model') group.add_argument('--num-classes', type=int, default=None, metavar='N', help='number of label classes (Model default if None)') group.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') group.add_argument('--img-size', type=int, default=None, metavar='N', help='Image size (default: None => model default)') group.add_argument('--in-chans', type=int, default=None, metavar='N', help='Image input channels (default: None => 3)') group.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') group.add_argument('--crop-pct', default=None, type=float, metavar='N', help='Input image center crop percent (for validation only)') group.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') group.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of dataset') group.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') group.add_argument('-b', '--batch-size', type=int, default=128, metavar='N', help='Input batch size for training (default: 128)') group.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N', help='Validation batch size override (default: None)') group.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') group.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") group.add_argument('--grad-accum-steps', type=int, default=1, metavar='N', help='The number of steps to accumulate gradients (default: 1)') group.add_argument('--grad-checkpointing', action='store_true', default=False, help='Enable gradient checkpointing through model blocks/stages') group.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') group.add_argument('--model-kwargs', nargs='*', default={}, action=utils.ParseKwargs) group.add_argument('--head-init-scale', default=None, type=float, help='Head initialization scale') group.add_argument('--head-init-bias', default=None, type=float, help='Head initialization bias value') scripting_group = group.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true', help='torch.jit.script the full model') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help='Enable compilation w/ specified backend (default: inductor).') group = parser.add_argument_group('Device parameters') group.add_argument('--device', default='cuda', type=str, help='Device (accelerator) to use.') group.add_argument('--amp', action='store_true', default=False, help='use NVIDIA Apex AMP or Native AMP for mixed precision training') group.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16)') group.add_argument('--amp-impl', default='native', type=str, help='AMP impl to use, "native" or "apex" (default: native)') group.add_argument('--no-ddp-bb', action='store_true', default=False, help='Force broadcast buffers for native DDP to off.') group.add_argument('--synchronize-step', action='store_true', default=False, help='torch.cuda.synchronize() end of each step') group.add_argument('--local_rank', default=0, type=int) parser.add_argument('--device-modules', default=None, type=str, nargs='+', help='Python imports for device backend modules.') group = parser.add_argument_group('Optimizer parameters') group.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd")') group.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') group.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') group.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') group.add_argument('--weight-decay', type=float, default=2e-05, help='weight decay (default: 2e-5)') group.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') group.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') group.add_argument('--layer-decay', type=float, default=None, help='layer-wise learning rate decay (default: None)') group.add_argument('--opt-kwargs', nargs='*', default={}, action=utils.ParseKwargs) group = parser.add_argument_group('Learning rate schedule parameters') group.add_argument('--sched', type=str, default='cosine', metavar='SCHEDULER', help='LR scheduler (default: "cosine"') group.add_argument('--sched-on-updates', action='store_true', default=False, help='Apply LR scheduler step on update instead of epoch end.') group.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate, overrides lr-base if set (default: None)') group.add_argument('--lr-base', type=float, default=0.1, metavar='LR', help='base learning rate: lr = lr_base * global_batch_size / base_size') group.add_argument('--lr-base-size', type=int, default=256, metavar='DIV', help='base learning rate batch size (divisor, default: 256).') group.add_argument('--lr-base-scale', type=str, default='', metavar='SCALE', help='base learning rate vs batch_size scaling ("linear", "sqrt", based on opt if empty)') group.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages') group.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)') group.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)') group.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT', help='learning rate cycle len multiplier (default: 1.0)') group.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT', help='amount to decay each learning rate cycle (default: 0.5)') group.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N', help='learning rate cycle limit, cycles enabled if > 1') group.add_argument('--lr-k-decay', type=float, default=1.0, help='learning rate k-decay for cosine/poly (default: 1.0)') group.add_argument('--warmup-lr', type=float, default=1e-05, metavar='LR', help='warmup learning rate (default: 1e-5)') group.add_argument('--min-lr', type=float, default=0, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (default: 0)') group.add_argument('--epochs', type=int, default=300, metavar='N', help='number of epochs to train (default: 300)') group.add_argument('--epoch-repeats', type=float, default=0.0, metavar='N', help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).') group.add_argument('--start-epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)') group.add_argument('--decay-milestones', default=[90, 180, 270], type=int, nargs='+', metavar='MILESTONES', help='list of decay epoch indices for multistep lr. must be increasing') group.add_argument('--decay-epochs', type=float, default=90, metavar='N', help='epoch interval to decay LR') group.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') (group.add_argument('--warmup-prefix', action='store_true', default=False, help='Exclude warmup period from decay schedule.'),) group.add_argument('--cooldown-epochs', type=int, default=0, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends') group.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10)') group.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') group = parser.add_argument_group('Augmentation and regularization parameters') group.add_argument('--no-aug', action='store_true', default=False, help='Disable all training augmentation, override other train aug args') (group.add_argument('--train-crop-mode', type=str, default=None, help='Crop-mode in train'),) group.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT', help='Random resize scale (default: 0.08 1.0)') group.add_argument('--ratio', type=float, nargs='+', default=[3.0 / 4.0, 4.0 / 3.0], metavar='RATIO', help='Random resize aspect ratio (default: 0.75 1.33)') group.add_argument('--hflip', type=float, default=0.5, help='Horizontal flip training aug probability') group.add_argument('--vflip', type=float, default=0.0, help='Vertical flip training aug probability') group.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') group.add_argument('--color-jitter-prob', type=float, default=None, metavar='PCT', help='Probability of applying any color jitter.') group.add_argument('--grayscale-prob', type=float, default=None, metavar='PCT', help='Probability of applying random grayscale conversion.') group.add_argument('--gaussian-blur-prob', type=float, default=None, metavar='PCT', help='Probability of applying gaussian blur.') (group.add_argument('--aa', type=str, default=None, metavar='NAME', help='Use AutoAugment policy. "v0" or "original". (default: None)'),) group.add_argument('--aug-repeats', type=float, default=0, help='Number of augmentation repetitions (distributed training only) (default: 0)') group.add_argument('--aug-splits', type=int, default=0, help='Number of augmentation splits (default: 0, valid: 0 or >=2)') group.add_argument('--jsd-loss', action='store_true', default=False, help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.') group.add_argument('--bce-loss', action='store_true', default=False, help='Enable BCE loss w/ Mixup/CutMix use.') group.add_argument('--bce-sum', action='store_true', default=False, help='Sum over classes when using BCE loss.') group.add_argument('--bce-target-thresh', type=float, default=None, help='Threshold for binarizing softened BCE targets (default: None, disabled).') group.add_argument('--bce-pos-weight', type=float, default=None, help='Positive weighting for BCE loss.') group.add_argument('--reprob', type=float, default=0.0, metavar='PCT', help='Random erase prob (default: 0.)') group.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') group.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') group.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') group.add_argument('--mixup', type=float, default=0.0, help='mixup alpha, mixup enabled if > 0. (default: 0.)') group.add_argument('--cutmix', type=float, default=0.0, help='cutmix alpha, cutmix enabled if > 0. (default: 0.)') group.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') group.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') group.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') group.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') group.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', help='Turn off mixup after this epoch, disabled if 0 (default: 0)') group.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') group.add_argument('--train-interpolation', type=str, default='random', help='Training interpolation (random, bilinear, bicubic default: "random")') group.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') group.add_argument('--drop-connect', type=float, default=None, metavar='PCT', help='Drop connect rate, DEPRECATED, use drop-path (default: None)') group.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') group.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') group = parser.add_argument_group('Batch norm parameters', 'Only works with gen_efficientnet based models currently.') group.add_argument('--bn-momentum', type=float, default=None, help='BatchNorm momentum override (if not None)') group.add_argument('--bn-eps', type=float, default=None, help='BatchNorm epsilon override (if not None)') group.add_argument('--sync-bn', action='store_true', help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') group.add_argument('--dist-bn', type=str, default='reduce', help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")') group.add_argument('--split-bn', action='store_true', help='Enable separate BN layers per augmentation split.') group = parser.add_argument_group('Model exponential moving average parameters') group.add_argument('--model-ema', action='store_true', default=False, help='Enable tracking moving average of model weights.') group.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') group.add_argument('--model-ema-decay', type=float, default=0.9998, help='Decay factor for model weights moving average (default: 0.9998)') group.add_argument('--model-ema-warmup', action='store_true', help='Enable warmup for model EMA decay.') group = parser.add_argument_group('Miscellaneous parameters') group.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') group.add_argument('--worker-seeding', type=str, default='all', help='worker seed mode (default: all)') group.add_argument('--log-interval', type=int, default=50, metavar='N', help='how many batches to wait before logging training status') group.add_argument('--recovery-interval', type=int, default=0, metavar='N', help='how many batches to wait before writing recovery checkpoint') group.add_argument('--checkpoint-hist', type=int, default=10, metavar='N', help='number of checkpoints to keep (default: 10)') group.add_argument('-j', '--workers', type=int, default=4, metavar='N', help='how many training processes to use (default: 4)') group.add_argument('--save-images', action='store_true', default=False, help='save images of input bathes every log interval for debugging') group.add_argument('--pin-mem', action='store_true', default=False, help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') group.add_argument('--no-prefetcher', action='store_true', default=False, help='disable fast prefetcher') group.add_argument('--output', default='', type=str, metavar='PATH', help='path to output folder (default: none, current dir)') group.add_argument('--experiment', default='', type=str, metavar='NAME', help='name of train experiment, name of sub-folder for output') group.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC', help='Best metric (default: "top1"') group.add_argument('--tta', type=int, default=0, metavar='N', help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)') group.add_argument('--use-multi-epochs-loader', action='store_true', default=False, help='use the multi-epochs-loader to save time at the beginning of every epoch') group.add_argument('--log-wandb', action='store_true', default=False, help='log training and validation metrics to wandb') def _parse_args(): (args_config, remaining) = config_parser.parse_known_args() if args_config.config: with open(args_config.config, 'r') as f: cfg = yaml.safe_load(f) parser.set_defaults(**cfg) args = parser.parse_args(remaining) args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) return (args, args_text) def main(): utils.setup_default_logging() (args, args_text) = _parse_args() if args.device_modules: for module in args.device_modules: importlib.import_module(module) if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True args.prefetcher = not args.no_prefetcher args.grad_accum_steps = max(1, args.grad_accum_steps) device = utils.init_distributed_device(args) if args.distributed: _logger.info(f'Training in distributed mode with multiple processes, 1 device per process.Process {args.rank}, total {args.world_size}, device {args.device}.') else: _logger.info(f'Training with a single process on 1 device ({args.device}).') assert args.rank >= 0 use_amp = None amp_dtype = torch.float16 if args.amp: if args.amp_impl == 'apex': assert has_apex, 'AMP impl specified as APEX but APEX is not installed.' use_amp = 'apex' assert args.amp_dtype == 'float16' else: assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).' use_amp = 'native' assert args.amp_dtype in ('float16', 'bfloat16') if args.amp_dtype == 'bfloat16': amp_dtype = torch.bfloat16 utils.random_seed(args.seed, args.rank) if args.fuser: utils.set_jit_fuser(args.fuser) if args.fast_norm: set_fast_norm() in_chans = 3 if args.in_chans is not None: in_chans = args.in_chans elif args.input_size is not None: in_chans = args.input_size[0] factory_kwargs = {} if args.pretrained_path: factory_kwargs['pretrained_cfg_overlay'] = dict(file=args.pretrained_path, num_classes=-1) model = create_model(args.model, pretrained=args.pretrained, in_chans=in_chans, num_classes=args.num_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, drop_block_rate=args.drop_block, global_pool=args.gp, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps, scriptable=args.torchscript, checkpoint_path=args.initial_checkpoint, **factory_kwargs, **args.model_kwargs) if args.head_init_scale is not None: with torch.no_grad(): model.get_classifier().weight.mul_(args.head_init_scale) model.get_classifier().bias.mul_(args.head_init_scale) if args.head_init_bias is not None: nn.init.constant_(model.get_classifier().bias, args.head_init_bias) if args.num_classes is None: assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' args.num_classes = model.num_classes if args.grad_checkpointing: model.set_grad_checkpointing(enable=True) if utils.is_primary(args): _logger.info(f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}') data_config = resolve_data_config(vars(args), model=model, verbose=utils.is_primary(args)) num_aug_splits = 0 if args.aug_splits > 0: assert args.aug_splits > 1, 'A split of 1 makes no sense' num_aug_splits = args.aug_splits if args.split_bn: assert num_aug_splits > 1 or args.resplit model = convert_splitbn_model(model, max(num_aug_splits, 2)) model.to(device=device) if args.channels_last: model.to(memory_format=torch.channels_last) if args.distributed and args.sync_bn: args.dist_bn = '' assert not args.split_bn if has_apex and use_amp == 'apex': model = convert_syncbn_model(model) else: model = convert_sync_batchnorm(model) if utils.is_primary(args): _logger.info('Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.') if args.torchscript: assert not args.torchcompile assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model' assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model' model = torch.jit.script(model) if not args.lr: global_batch_size = args.batch_size * args.world_size * args.grad_accum_steps batch_ratio = global_batch_size / args.lr_base_size if not args.lr_base_scale: on = args.opt.lower() args.lr_base_scale = 'sqrt' if any([o in on for o in ('ada', 'lamb')]) else 'linear' if args.lr_base_scale == 'sqrt': batch_ratio = batch_ratio ** 0.5 args.lr = args.lr_base * batch_ratio if utils.is_primary(args): _logger.info(f'Learning rate ({args.lr}) calculated from base learning rate ({args.lr_base}) and effective global batch size ({global_batch_size}) with {args.lr_base_scale} scaling.') optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args), **args.opt_kwargs) amp_autocast = suppress loss_scaler = None if use_amp == 'apex': assert device.type == 'cuda' (model, optimizer) = amp.initialize(model, optimizer, opt_level='O1') loss_scaler = ApexScaler() if utils.is_primary(args): _logger.info('Using NVIDIA APEX AMP. Training in mixed precision.') elif use_amp == 'native': try: amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype) except (AttributeError, TypeError): assert device.type == 'cuda' amp_autocast = torch.cuda.amp.autocast if device.type == 'cuda' and amp_dtype == torch.float16: loss_scaler = NativeScaler() if utils.is_primary(args): _logger.info('Using native Torch AMP. Training in mixed precision.') elif utils.is_primary(args): _logger.info('AMP not enabled. Training in float32.') resume_epoch = None if args.resume: resume_epoch = resume_checkpoint(model, args.resume, optimizer=None if args.no_resume_opt else optimizer, loss_scaler=None if args.no_resume_opt else loss_scaler, log_info=utils.is_primary(args)) model_ema = None if args.model_ema: model_ema = utils.ModelEmaV3(model, decay=args.model_ema_decay, use_warmup=args.model_ema_warmup, device='cpu' if args.model_ema_force_cpu else None) if args.resume: load_checkpoint(model_ema.module, args.resume, use_ema=True) if args.torchcompile: model_ema = torch.compile(model_ema, backend=args.torchcompile) if args.distributed: if has_apex and use_amp == 'apex': if utils.is_primary(args): _logger.info('Using NVIDIA APEX DistributedDataParallel.') model = ApexDDP(model, delay_allreduce=True) else: if utils.is_primary(args): _logger.info('Using native Torch DistributedDataParallel.') model = NativeDDP(model, device_ids=[device], broadcast_buffers=not args.no_ddp_bb) if args.torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.' model = torch.compile(model, backend=args.torchcompile) if args.data and (not args.data_dir): args.data_dir = args.data if args.input_img_mode is None: input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L' else: input_img_mode = args.input_img_mode dataset_train = create_dataset(args.dataset, root=args.data_dir, split=args.train_split, is_training=True, class_map=args.class_map, download=args.dataset_download, batch_size=args.batch_size, seed=args.seed, repeats=args.epoch_repeats, input_img_mode=input_img_mode, input_key=args.input_key, target_key=args.target_key, num_samples=args.train_num_samples) if args.val_split: dataset_eval = create_dataset(args.dataset, root=args.data_dir, split=args.val_split, is_training=False, class_map=args.class_map, download=args.dataset_download, batch_size=args.batch_size, input_img_mode=input_img_mode, input_key=args.input_key, target_key=args.target_key, num_samples=args.val_num_samples) collate_fn = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0.0 or args.cutmix_minmax is not None if mixup_active: mixup_args = dict(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.num_classes) if args.prefetcher: assert not num_aug_splits collate_fn = FastCollateMixup(**mixup_args) else: mixup_fn = Mixup(**mixup_args) if num_aug_splits > 1: dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits) train_interpolation = args.train_interpolation if args.no_aug or not train_interpolation: train_interpolation = data_config['interpolation'] loader_train = create_loader(dataset_train, input_size=data_config['input_size'], batch_size=args.batch_size, is_training=True, no_aug=args.no_aug, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, re_split=args.resplit, train_crop_mode=args.train_crop_mode, scale=args.scale, ratio=args.ratio, hflip=args.hflip, vflip=args.vflip, color_jitter=args.color_jitter, color_jitter_prob=args.color_jitter_prob, grayscale_prob=args.grayscale_prob, gaussian_blur_prob=args.gaussian_blur_prob, auto_augment=args.aa, num_aug_repeats=args.aug_repeats, num_aug_splits=num_aug_splits, interpolation=train_interpolation, mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed, collate_fn=collate_fn, pin_memory=args.pin_mem, device=device, use_prefetcher=args.prefetcher, use_multi_epochs_loader=args.use_multi_epochs_loader, worker_seeding=args.worker_seeding) loader_eval = None if args.val_split: eval_workers = args.workers if args.distributed and ('tfds' in args.dataset or 'wds' in args.dataset): eval_workers = min(2, args.workers) loader_eval = create_loader(dataset_eval, input_size=data_config['input_size'], batch_size=args.validation_batch_size or args.batch_size, is_training=False, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=eval_workers, distributed=args.distributed, crop_pct=data_config['crop_pct'], pin_memory=args.pin_mem, device=device, use_prefetcher=args.prefetcher) if args.jsd_loss: assert num_aug_splits > 1 train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing) elif mixup_active: if args.bce_loss: train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh, sum_classes=args.bce_sum, pos_weight=args.bce_pos_weight) else: train_loss_fn = SoftTargetCrossEntropy() elif args.smoothing: if args.bce_loss: train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh, sum_classes=args.bce_sum, pos_weight=args.bce_pos_weight) else: train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: train_loss_fn = nn.CrossEntropyLoss() train_loss_fn = train_loss_fn.to(device=device) validate_loss_fn = nn.CrossEntropyLoss().to(device=device) eval_metric = args.eval_metric if loader_eval is not None else 'loss' decreasing_metric = eval_metric == 'loss' best_metric = None best_epoch = None saver = None output_dir = None if utils.is_primary(args): if args.experiment: exp_name = args.experiment else: exp_name = '-'.join([datetime.now().strftime('%Y%m%d-%H%M%S'), safe_model_name(args.model), str(data_config['input_size'][-1])]) output_dir = utils.get_outdir(args.output if args.output else './output/train', exp_name) saver = utils.CheckpointSaver(model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler, checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing_metric, max_history=args.checkpoint_hist) with open(os.path.join(output_dir, 'args.yaml'), 'w') as f: f.write(args_text) if utils.is_primary(args) and args.log_wandb: if has_wandb: wandb.init(project=args.experiment, config=args) else: _logger.warning("You've requested to log metrics to wandb but package not found. Metrics not being logged to wandb, try `pip install wandb`") updates_per_epoch = (len(loader_train) + args.grad_accum_steps - 1) // args.grad_accum_steps (lr_scheduler, num_epochs) = create_scheduler_v2(optimizer, **scheduler_kwargs(args, decreasing_metric=decreasing_metric), updates_per_epoch=updates_per_epoch) start_epoch = 0 if args.start_epoch is not None: start_epoch = args.start_epoch elif resume_epoch is not None: start_epoch = resume_epoch if lr_scheduler is not None and start_epoch > 0: if args.sched_on_updates: lr_scheduler.step_update(start_epoch * updates_per_epoch) else: lr_scheduler.step(start_epoch) if utils.is_primary(args): _logger.info(f"Scheduled epochs: {num_epochs}. LR stepped per {('epoch' if lr_scheduler.t_in_epochs else 'update')}.") results = [] try: for epoch in range(start_epoch, num_epochs): if hasattr(dataset_train, 'set_epoch'): dataset_train.set_epoch(epoch) elif args.distributed and hasattr(loader_train.sampler, 'set_epoch'): loader_train.sampler.set_epoch(epoch) train_metrics = train_one_epoch(epoch, model, loader_train, optimizer, train_loss_fn, args, lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn, num_updates_total=num_epochs * updates_per_epoch) if args.distributed and args.dist_bn in ('broadcast', 'reduce'): if utils.is_primary(args): _logger.info('Distributing BatchNorm running means and vars') utils.distribute_bn(model, args.world_size, args.dist_bn == 'reduce') if loader_eval is not None: eval_metrics = validate(model, loader_eval, validate_loss_fn, args, device=device, amp_autocast=amp_autocast) if model_ema is not None and (not args.model_ema_force_cpu): if args.distributed and args.dist_bn in ('broadcast', 'reduce'): utils.distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce') ema_eval_metrics = validate(model_ema, loader_eval, validate_loss_fn, args, device=device, amp_autocast=amp_autocast, log_suffix=' (EMA)') eval_metrics = ema_eval_metrics else: eval_metrics = None if output_dir is not None: lrs = [param_group['lr'] for param_group in optimizer.param_groups] utils.update_summary(epoch, train_metrics, eval_metrics, filename=os.path.join(output_dir, 'summary.csv'), lr=sum(lrs) / len(lrs), write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb) if eval_metrics is not None: latest_metric = eval_metrics[eval_metric] else: latest_metric = train_metrics[eval_metric] if saver is not None: (best_metric, best_epoch) = saver.save_checkpoint(epoch, metric=latest_metric) if lr_scheduler is not None: lr_scheduler.step(epoch + 1, latest_metric) results.append({'epoch': epoch, 'train': train_metrics, 'validation': eval_metrics}) except KeyboardInterrupt: pass results = {'all': results} if best_metric is not None: results['best'] = results['all'][best_epoch - start_epoch] _logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) print(f'--result\n{json.dumps(results, indent=4)}') def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, device=torch.device('cuda'), lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None, num_updates_total=None): if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: if args.prefetcher and loader.mixup_enabled: loader.mixup_enabled = False elif mixup_fn is not None: mixup_fn.mixup_enabled = False second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order has_no_sync = hasattr(model, 'no_sync') update_time_m = utils.AverageMeter() data_time_m = utils.AverageMeter() losses_m = utils.AverageMeter() model.train() accum_steps = args.grad_accum_steps last_accum_steps = len(loader) % accum_steps updates_per_epoch = (len(loader) + accum_steps - 1) // accum_steps num_updates = epoch * updates_per_epoch last_batch_idx = len(loader) - 1 last_batch_idx_to_accum = len(loader) - last_accum_steps data_start_time = update_start_time = time.time() optimizer.zero_grad() update_sample_count = 0 for (batch_idx, (input, target)) in enumerate(loader): last_batch = batch_idx == last_batch_idx need_update = last_batch or (batch_idx + 1) % accum_steps == 0 update_idx = batch_idx // accum_steps if batch_idx >= last_batch_idx_to_accum: accum_steps = last_accum_steps if not args.prefetcher: (input, target) = (input.to(device), target.to(device)) if mixup_fn is not None: (input, target) = mixup_fn(input, target) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) data_time_m.update(accum_steps * (time.time() - data_start_time)) def _forward(): with amp_autocast(): output = model(input) loss = loss_fn(output, target) if accum_steps > 1: loss /= accum_steps return loss def _backward(_loss): if loss_scaler is not None: loss_scaler(_loss, optimizer, clip_grad=args.clip_grad, clip_mode=args.clip_mode, parameters=model_parameters(model, exclude_head='agc' in args.clip_mode), create_graph=second_order, need_update=need_update) else: _loss.backward(create_graph=second_order) if need_update: if args.clip_grad is not None: utils.dispatch_clip_grad(model_parameters(model, exclude_head='agc' in args.clip_mode), value=args.clip_grad, mode=args.clip_mode) optimizer.step() if has_no_sync and (not need_update): with model.no_sync(): loss = _forward() _backward(loss) else: loss = _forward() _backward(loss) if not args.distributed: losses_m.update(loss.item() * accum_steps, input.size(0)) update_sample_count += input.size(0) if not need_update: data_start_time = time.time() continue num_updates += 1 optimizer.zero_grad() if model_ema is not None: model_ema.update(model, step=num_updates) if args.synchronize_step and device.type == 'cuda': torch.cuda.synchronize() time_now = time.time() update_time_m.update(time.time() - update_start_time) update_start_time = time_now if update_idx % args.log_interval == 0: lrl = [param_group['lr'] for param_group in optimizer.param_groups] lr = sum(lrl) / len(lrl) if args.distributed: reduced_loss = utils.reduce_tensor(loss.data, args.world_size) losses_m.update(reduced_loss.item() * accum_steps, input.size(0)) update_sample_count *= args.world_size if utils.is_primary(args): _logger.info(f'Train: {epoch} [{update_idx:>4d}/{updates_per_epoch} ({100.0 * (update_idx + 1) / updates_per_epoch:>3.0f}%)] Loss: {losses_m.val:#.3g} ({losses_m.avg:#.3g}) Time: {update_time_m.val:.3f}s, {update_sample_count / update_time_m.val:>7.2f}/s ({update_time_m.avg:.3f}s, {update_sample_count / update_time_m.avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time_m.val:.3f} ({data_time_m.avg:.3f})') if args.save_images and output_dir: torchvision.utils.save_image(input, os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx), padding=0, normalize=True) if saver is not None and args.recovery_interval and ((update_idx + 1) % args.recovery_interval == 0): saver.save_recovery(epoch, batch_idx=update_idx) if lr_scheduler is not None: lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) update_sample_count = 0 data_start_time = time.time() if hasattr(optimizer, 'sync_lookahead'): optimizer.sync_lookahead() return OrderedDict([('loss', losses_m.avg)]) def validate(model, loader, loss_fn, args, device=torch.device('cuda'), amp_autocast=suppress, log_suffix=''): batch_time_m = utils.AverageMeter() losses_m = utils.AverageMeter() top1_m = utils.AverageMeter() top5_m = utils.AverageMeter() model.eval() end = time.time() last_idx = len(loader) - 1 with torch.no_grad(): for (batch_idx, (input, target)) in enumerate(loader): last_batch = batch_idx == last_idx if not args.prefetcher: input = input.to(device) target = target.to(device) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): output = model(input) if isinstance(output, (tuple, list)): output = output[0] reduce_factor = args.tta if reduce_factor > 1: output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) target = target[0:target.size(0):reduce_factor] loss = loss_fn(output, target) (acc1, acc5) = utils.accuracy(output, target, topk=(1, 5)) if args.distributed: reduced_loss = utils.reduce_tensor(loss.data, args.world_size) acc1 = utils.reduce_tensor(acc1, args.world_size) acc5 = utils.reduce_tensor(acc5, args.world_size) else: reduced_loss = loss.data if device.type == 'cuda': torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) top1_m.update(acc1.item(), output.size(0)) top5_m.update(acc5.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() if utils.is_primary(args) and (last_batch or batch_idx % args.log_interval == 0): log_name = 'Test' + log_suffix _logger.info(f'{log_name}: [{batch_idx:>4d}/{last_idx}] Time: {batch_time_m.val:.3f} ({batch_time_m.avg:.3f}) Loss: {losses_m.val:>7.3f} ({losses_m.avg:>6.3f}) Acc@1: {top1_m.val:>7.3f} ({top1_m.avg:>7.3f}) Acc@5: {top5_m.val:>7.3f} ({top5_m.avg:>7.3f})') metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)]) return metrics if __name__ == '__main__': main() # File: pytorch-image-models-main/validate.py """""" import argparse import csv import glob import json import logging import os import time from collections import OrderedDict from contextlib import suppress from functools import partial import torch import torch.nn as nn import torch.nn.parallel from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet from timm.layers import apply_test_time_pool, set_fast_norm from timm.models import create_model, load_checkpoint, is_model, list_models from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs, reparameterize_model try: from apex import amp has_apex = True except ImportError: has_apex = False has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') _logger = logging.getLogger('validate') parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('data', nargs='?', metavar='DIR', const=None, help='path to dataset (*deprecated*, use --data-dir)') parser.add_argument('--data-dir', metavar='DIR', help='path to dataset (root dir)') parser.add_argument('--dataset', metavar='NAME', default='', help='dataset type + name ("/") (default: ImageFolder or ImageTar if empty)') parser.add_argument('--split', metavar='NAME', default='validation', help='dataset split (default: validation)') parser.add_argument('--num-samples', default=None, type=int, metavar='N', help='Manually specify num samples in dataset split, for IterableDatasets.') parser.add_argument('--dataset-download', action='store_true', default=False, help='Allow download of dataset for torch/ and tfds/ datasets that support it.') parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', help='path to class to idx mapping file (default: "")') parser.add_argument('--input-key', default=None, type=str, help='Dataset key for input images.') parser.add_argument('--input-img-mode', default=None, type=str, help='Dataset image conversion mode for input images.') parser.add_argument('--target-key', default=None, type=str, help='Dataset key for target labels.') parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', help='model architecture (default: dpn92)') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--in-chans', type=int, default=None, metavar='N', help='Image input channels (default: None => 3)') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='force use of train input size, even when test size is specified in pretrained cfg') parser.add_argument('--crop-pct', default=None, type=float, metavar='N', help='Input image center crop pct') parser.add_argument('--crop-mode', default=None, type=str, metavar='N', help='Input image crop mode (squash, border, center). Model default if None.') parser.add_argument('--crop-border-pixels', type=int, default=None, help='Crop pixels from image border.') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--log-freq', default=10, type=int, metavar='N', help='batch logging frequency (default: 10)') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--num-gpu', type=int, default=1, help='Number of GPUS to use') parser.add_argument('--test-pool', dest='test_pool', action='store_true', help='enable test time pool') parser.add_argument('--no-prefetcher', action='store_true', default=False, help='disable fast prefetcher') parser.add_argument('--pin-mem', action='store_true', default=False, help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--device', default='cuda', type=str, help='Device (accelerator) to use.') parser.add_argument('--amp', action='store_true', default=False, help='use NVIDIA Apex AMP or Native AMP for mixed precision training') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16)') parser.add_argument('--amp-impl', default='native', type=str, help='AMP impl to use, "native" or "apex" (default: native)') parser.add_argument('--tf-preprocessing', action='store_true', default=False, help='Use Tensorflow preprocessing pipeline (require CPU TF installed') parser.add_argument('--use-ema', dest='use_ema', action='store_true', help='use ema version of weights if present') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', default=False, action='store_true', help='torch.jit.script the full model') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help='Enable compilation w/ specified backend (default: inductor).') scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help='Enable AOT Autograd support.') parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', help='Output csv file for validation results (summary)') parser.add_argument('--results-format', default='csv', type=str, help='Format for results file one of (csv, json) (default: csv).') parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME', help='Real labels JSON file for imagenet evaluation') parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME', help='Valid label indices txt file for validation of partial label space') parser.add_argument('--retry', default=False, action='store_true', help='Enable batch size decay & retry for single model validation') def validate(args): args.pretrained = args.pretrained or not args.checkpoint args.prefetcher = not args.no_prefetcher if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True device = torch.device(args.device) use_amp = None amp_autocast = suppress if args.amp: if args.amp_impl == 'apex': assert has_apex, 'AMP impl specified as APEX but APEX is not installed.' assert args.amp_dtype == 'float16' use_amp = 'apex' _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') else: assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).' assert args.amp_dtype in ('float16', 'bfloat16') use_amp = 'native' amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16 amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype) _logger.info('Validating in mixed precision with native PyTorch AMP.') else: _logger.info('Validating in float32. AMP not enabled.') if args.fuser: set_jit_fuser(args.fuser) if args.fast_norm: set_fast_norm() in_chans = 3 if args.in_chans is not None: in_chans = args.in_chans elif args.input_size is not None: in_chans = args.input_size[0] model = create_model(args.model, pretrained=args.pretrained, num_classes=args.num_classes, in_chans=in_chans, global_pool=args.gp, scriptable=args.torchscript, **args.model_kwargs) if args.num_classes is None: assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' args.num_classes = model.num_classes if args.checkpoint: load_checkpoint(model, args.checkpoint, args.use_ema) if args.reparam: model = reparameterize_model(model) param_count = sum([m.numel() for m in model.parameters()]) _logger.info('Model %s created, param count: %d' % (args.model, param_count)) data_config = resolve_data_config(vars(args), model=model, use_test_size=not args.use_train_size, verbose=True) test_time_pool = False if args.test_pool: (model, test_time_pool) = apply_test_time_pool(model, data_config) model = model.to(device) if args.channels_last: model = model.to(memory_format=torch.channels_last) if args.torchscript: assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model' model = torch.jit.script(model) elif args.torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.' torch._dynamo.reset() model = torch.compile(model, backend=args.torchcompile) elif args.aot_autograd: assert has_functorch, 'functorch is needed for --aot-autograd' model = memory_efficient_fusion(model) if use_amp == 'apex': model = amp.initialize(model, opt_level='O1') if args.num_gpu > 1: model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) criterion = nn.CrossEntropyLoss().to(device) root_dir = args.data or args.data_dir if args.input_img_mode is None: input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L' else: input_img_mode = args.input_img_mode dataset = create_dataset(root=root_dir, name=args.dataset, split=args.split, download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map, num_samples=args.num_samples, input_key=args.input_key, input_img_mode=input_img_mode, target_key=args.target_key) if args.valid_labels: with open(args.valid_labels, 'r') as f: valid_labels = [int(line.rstrip()) for line in f] else: valid_labels = None if args.real_labels: real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels) else: real_labels = None crop_pct = 1.0 if test_time_pool else data_config['crop_pct'] loader = create_loader(dataset, input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=args.prefetcher, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=crop_pct, crop_mode=data_config['crop_mode'], crop_border_pixels=args.crop_border_pixels, pin_memory=args.pin_mem, device=device, tf_preprocessing=args.tf_preprocessing) batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() with torch.no_grad(): input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): model(input) end = time.time() for (batch_idx, (input, target)) in enumerate(loader): if args.no_prefetcher: target = target.to(device) input = input.to(device) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): output = model(input) if valid_labels is not None: output = output[:, valid_labels] loss = criterion(output, target) if real_labels is not None: real_labels.add_result(output) (acc1, acc5) = accuracy(output.detach(), target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(acc1.item(), input.size(0)) top5.update(acc5.item(), input.size(0)) batch_time.update(time.time() - end) end = time.time() if batch_idx % args.log_freq == 0: _logger.info('Test: [{0:>4d}/{1}] Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(batch_idx, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, loss=losses, top1=top1, top5=top5)) if real_labels is not None: (top1a, top5a) = (real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)) else: (top1a, top5a) = (top1.avg, top5.avg) results = OrderedDict(model=args.model, top1=round(top1a, 4), top1_err=round(100 - top1a, 4), top5=round(top5a, 4), top5_err=round(100 - top5a, 4), param_count=round(param_count / 1000000.0, 2), img_size=data_config['input_size'][-1], crop_pct=crop_pct, interpolation=data_config['interpolation']) _logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(results['top1'], results['top1_err'], results['top5'], results['top5_err'])) return results def _try_run(args, initial_batch_size): batch_size = initial_batch_size results = OrderedDict() error_str = 'Unknown' while batch_size: args.batch_size = batch_size * args.num_gpu try: if torch.cuda.is_available() and 'cuda' in args.device: torch.cuda.empty_cache() results = validate(args) return results except RuntimeError as e: error_str = str(e) _logger.error(f'"{error_str}" while running validation.') if not check_batch_size_retry(error_str): break batch_size = decay_batch_step(batch_size) _logger.warning(f'Reducing batch size to {batch_size} for retry.') results['error'] = error_str _logger.error(f'{args.model} failed to validate ({error_str}).') return results _NON_IN1K_FILTERS = ['*_in21k', '*_in22k', '*in12k', '*_dino', '*fcmae', '*seer'] def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if os.path.isdir(args.checkpoint): checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') checkpoints += glob.glob(args.checkpoint + '/*.pth') model_names = list_models(args.model) model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] else: if args.model == 'all': args.pretrained = True model_names = list_models(pretrained=True, exclude_filters=_NON_IN1K_FILTERS) model_cfgs = [(n, '') for n in model_names] elif not is_model(args.model): model_names = list_models(args.model, pretrained=True) model_cfgs = [(n, '') for n in model_names] if not model_cfgs and os.path.isfile(args.model): with open(args.model) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names if n] if len(model_cfgs): _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) results = [] try: initial_batch_size = args.batch_size for (m, c) in model_cfgs: args.model = m args.checkpoint = c r = _try_run(args, initial_batch_size) if 'error' in r: continue if args.checkpoint: r['checkpoint'] = args.checkpoint results.append(r) except KeyboardInterrupt as e: pass results = sorted(results, key=lambda x: x['top1'], reverse=True) elif args.retry: results = _try_run(args, args.batch_size) else: results = validate(args) if args.results_file: write_results(args.results_file, results, format=args.results_format) print(f'--result\n{json.dumps(results, indent=4)}') def write_results(results_file, results, format='csv'): with open(results_file, mode='w') as cf: if format == 'json': json.dump(results, cf, indent=4) else: if not isinstance(results, (list, tuple)): results = [results] if not results: return dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main()