meg HF staff commited on
Commit
2f47fdc
·
verified ·
1 Parent(s): 3ad86c5

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +1 -1
  2. README.md +1 -1
  3. app.py +30 -25
  4. pytorch-image-models/CONTRIBUTING.md +1 -1
  5. pytorch-image-models/README.md +34 -51
  6. pytorch-image-models/UPGRADING.md +3 -3
  7. pytorch-image-models/benchmark.py +2 -2
  8. pytorch-image-models/hfdocs/source/changes.mdx +146 -5
  9. pytorch-image-models/hfdocs/source/models.mdx +3 -3
  10. pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx +2 -2
  11. pytorch-image-models/hfdocs/source/models/advprop.mdx +1 -1
  12. pytorch-image-models/hfdocs/source/models/big-transfer.mdx +1 -1
  13. pytorch-image-models/hfdocs/source/models/csp-darknet.mdx +1 -1
  14. pytorch-image-models/hfdocs/source/models/csp-resnet.mdx +1 -1
  15. pytorch-image-models/hfdocs/source/models/csp-resnext.mdx +1 -1
  16. pytorch-image-models/hfdocs/source/models/densenet.mdx +1 -1
  17. pytorch-image-models/hfdocs/source/models/dla.mdx +1 -1
  18. pytorch-image-models/hfdocs/source/models/dpn.mdx +1 -1
  19. pytorch-image-models/hfdocs/source/models/ecaresnet.mdx +1 -1
  20. pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx +2 -2
  21. pytorch-image-models/hfdocs/source/models/efficientnet.mdx +2 -2
  22. pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx +1 -1
  23. pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx +1 -1
  24. pytorch-image-models/hfdocs/source/models/fbnet.mdx +1 -1
  25. pytorch-image-models/hfdocs/source/models/gloun-inception-v3.mdx +2 -2
  26. pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx +1 -1
  27. pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx +1 -1
  28. pytorch-image-models/hfdocs/source/models/gloun-senet.mdx +1 -1
  29. pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx +1 -1
  30. pytorch-image-models/hfdocs/source/models/gloun-xception.mdx +1 -1
  31. pytorch-image-models/hfdocs/source/models/hrnet.mdx +1 -1
  32. pytorch-image-models/hfdocs/source/models/ig-resnext.mdx +1 -1
  33. pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx +1 -1
  34. pytorch-image-models/hfdocs/source/models/inception-v3.mdx +2 -2
  35. pytorch-image-models/hfdocs/source/models/inception-v4.mdx +1 -1
  36. pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx +1 -1
  37. pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx +1 -1
  38. pytorch-image-models/hfdocs/source/models/legacy-senet.mdx +1 -1
  39. pytorch-image-models/hfdocs/source/models/mixnet.mdx +1 -1
  40. pytorch-image-models/hfdocs/source/models/mnasnet.mdx +1 -1
  41. pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx +1 -1
  42. pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx +1 -1
  43. pytorch-image-models/hfdocs/source/models/nasnet.mdx +1 -1
  44. pytorch-image-models/hfdocs/source/models/noisy-student.mdx +1 -1
  45. pytorch-image-models/hfdocs/source/models/pnasnet.mdx +1 -1
  46. pytorch-image-models/hfdocs/source/models/regnetx.mdx +4 -4
  47. pytorch-image-models/hfdocs/source/models/regnety.mdx +4 -4
  48. pytorch-image-models/hfdocs/source/models/res2net.mdx +1 -1
  49. pytorch-image-models/hfdocs/source/models/res2next.mdx +1 -1
  50. pytorch-image-models/hfdocs/source/models/resnest.mdx +2 -2
Dockerfile CHANGED
@@ -16,4 +16,4 @@ COPY --chown=user train.sh pytorch-image-models
16
  RUN chmod +x pytorch-image-models/train.sh
17
 
18
  COPY --chown=user . /app
19
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
16
  RUN chmod +x pytorch-image-models/train.sh
17
 
18
  COPY --chown=user . /app
19
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: ImagenetTraining-imagenet-1k-random-20.0-frac-1over2
3
  emoji: 😻
4
  colorFrom: yellow
5
  colorTo: blue
 
1
  ---
2
+ title: ImageNetTraining20.0-frac-1over2
3
  emoji: 😻
4
  colorFrom: yellow
5
  colorTo: blue
app.py CHANGED
@@ -1,5 +1,7 @@
 
1
  import os
2
- from fastapi import FastAPI
 
3
  import wandb
4
  from huggingface_hub import HfApi
5
 
@@ -8,40 +10,43 @@ API = HfApi(token=TOKEN)
8
  wandb_api_key = os.environ.get('wandb_api_key')
9
  wandb.login(key=wandb_api_key)
10
 
11
- EXPERIMENT = "imagenet-1k-random-20.0-frac-1over2"
12
- # Input dataset
13
- INPUT = f"datacomp/{EXPERIMENT}"
14
- # Output for files and Space ID
15
- OUTPUT = f"datacomp/ImagenetTraining-{EXPERIMENT}"
16
 
17
- app = FastAPI()
18
-
19
- @app.get("/")
20
  def start_train():
21
- os.system("echo 'Space started!'")
22
- os.system("echo pwd")
23
  os.system("pwd")
24
- os.system("echo ls")
25
  os.system("ls")
26
- os.system("echo 'creating dataset for output files if it doesn't exist...'")
 
27
  try:
28
- API.create_repo(repo_id=OUTPUT, repo_type="dataset",)
 
29
  except:
 
30
  pass
31
- #space_variables = API.get_space_variables(repo_id=SPACE_ID)
32
- #if 'STATUS' not in space_variables or space_variables['STATUS'] != 'COMPUTING':
33
  os.system("echo 'Beginning processing.'")
34
- # API.add_space_variable(repo_id=SPACE_ID, key='STATUS', value='COMPUTING')
35
  # Handles CUDA OOM errors.
36
  os.system(f"export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True")
37
- # Prints more informative CUDA errors (I think? I've forgotten now.)
38
- os.system("export CUDA_LAUNCH_BLOCKING=1")
39
  os.system("echo 'Okay, trying training.'")
40
- os.system(f"cd pytorch-image-models; ./train.sh 4 --dataset hfds/{INPUT} --log-wandb --experiment {EXPERIMENT} --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4")
41
- os.system("echo ls")
42
  os.system("ls")
 
43
  os.system("echo 'trying to upload...'")
44
- API.upload_large_folder(folder_path="/app", repo_id=OUTPUT, repo_type="dataset",)
45
- #API.add_space_variable(repo_id=SPACE_ID, key='STATUS', value='NOT_COMPUTING')
46
- #API.pause_space(SPACE_ID)
47
- return {"Completed": "!"}
 
 
 
 
 
 
 
 
 
 
1
+
2
  import os
3
+ import gradio as gr
4
+
5
  import wandb
6
  from huggingface_hub import HfApi
7
 
 
10
  wandb_api_key = os.environ.get('wandb_api_key')
11
  wandb.login(key=wandb_api_key)
12
 
13
+ random_num = f"20.0"
14
+ subset = f"frac-1over2"
15
+ experiment_name = f"ImageNetTraining20.0-frac-1over2"
16
+ experiment_repo = f"datacomp/ImageNetTraining20.0-frac-1over2"
 
17
 
 
 
 
18
  def start_train():
19
+ os.system("echo '#### pwd'")
 
20
  os.system("pwd")
21
+ os.system("echo '#### ls'")
22
  os.system("ls")
23
+ # Create a place to put the output.
24
+ os.system("echo 'Creating results output repository in case it does not exist yet...'")
25
  try:
26
+ API.create_repo(repo_id=f"datacomp/ImageNetTraining20.0-frac-1over2", repo_type="dataset",)
27
+ os.system(f"echo 'Created results output repository datacomp/ImageNetTraining20.0-frac-1over2'")
28
  except:
29
+ os.system("echo 'Already there; skipping.'")
30
  pass
 
 
31
  os.system("echo 'Beginning processing.'")
 
32
  # Handles CUDA OOM errors.
33
  os.system(f"export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True")
 
 
34
  os.system("echo 'Okay, trying training.'")
35
+ os.system(f"cd pytorch-image-models; ./train.sh 4 --dataset hfds/datacomp/imagenet-1k-random-20.0-frac-1over2 --log-wandb --wandb-project ImageNetTraining20.0-frac-1over2 --experiment ImageNetTraining20.0-frac-1over2 --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4")
36
+ os.system("echo 'Done'.")
37
  os.system("ls")
38
+ # Upload output to repository
39
  os.system("echo 'trying to upload...'")
40
+ API.upload_folder(folder_path="/app", repo_id=f"datacomp/ImageNetTraining20.0-frac-1over2", repo_type="dataset",)
41
+ API.pause_space(experiment_repo)
42
+
43
+ def run():
44
+ with gr.Blocks() as app:
45
+ gr.Markdown(f"Randomization: 20.0")
46
+ gr.Markdown(f"Subset: frac-1over2")
47
+ start = gr.Button("Start")
48
+ start.click(start_train)
49
+ app.launch(server_name="0.0.0.0", server_port=7860)
50
+
51
+ if __name__ == '__main__':
52
+ run()
pytorch-image-models/CONTRIBUTING.md CHANGED
@@ -10,7 +10,7 @@ Code linting and auto-format (black) are not currently in place but open to cons
10
 
11
  A few specific differences from Google style (or black)
12
  1. Line length is 120 char. Going over is okay in some cases (e.g. I prefer not to break URL across lines).
13
- 2. Hanging indents are always prefered, please avoid aligning arguments with closing brackets or braces.
14
 
15
  Example, from Google guide, but this is a NO here:
16
  ```
 
10
 
11
  A few specific differences from Google style (or black)
12
  1. Line length is 120 char. Going over is okay in some cases (e.g. I prefer not to break URL across lines).
13
+ 2. Hanging indents are always preferred, please avoid aligning arguments with closing brackets or braces.
14
 
15
  Example, from Google guide, but this is a NO here:
16
  ```
pytorch-image-models/README.md CHANGED
@@ -12,6 +12,37 @@
12
 
13
  ## What's New
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  ## Nov 28, 2024
16
  * More optimizers
17
  * Add MARS optimizer (https://arxiv.org/abs/2411.10438, https://github.com/AGI-Arena/MARS)
@@ -94,7 +125,6 @@ Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weight
94
  * [mobilenetv3_large_150d.ra4_e3600_r256_in1k](http://hf.co/timm/mobilenetv3_large_150d.ra4_e3600_r256_in1k) - 81.81 @ 320, 80.94 @ 256
95
  * [mobilenetv3_large_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv3_large_100.ra4_e3600_r224_in1k) - 77.16 @ 256, 76.31 @ 224
96
 
97
-
98
  ### Aug 21, 2024
99
  * Updated SBB ViT models trained on ImageNet-12k and fine-tuned on ImageNet-1k, challenging quite a number of much larger, slower models
100
 
@@ -216,7 +246,7 @@ Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weight
216
  ### May 14, 2024
217
  * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling.
218
  * Add Hiera models from Meta (https://github.com/facebookresearch/hiera).
219
- * Add `normalize=` flag for transorms, return non-normalized torch.Tensor with original dytpe (for `chug`)
220
  * Version 1.0.3 release
221
 
222
  ### May 11, 2024
@@ -248,7 +278,7 @@ Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weight
248
  ### April 11, 2024
249
  * Prepping for a long overdue 1.0 release, things have been stable for a while now.
250
  * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`)
251
- * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or direclty.
252
  ```python
253
  model = timm.create_model('vit_base_patch16_224')
254
  final_feat, intermediates = model.forward_intermediates(input)
@@ -297,53 +327,6 @@ torch.Size([2, 768, 32, 32])
297
  * Min supported Python version increased to 3.8
298
  * Release 0.9.16
299
 
300
- ### Jan 8, 2024
301
- Datasets & transform refactoring
302
- * HuggingFace streaming (iterable) dataset support (`--dataset hfids:org/dataset`)
303
- * Webdataset wrapper tweaks for improved split info fetching, can auto fetch splits from supported HF hub webdataset
304
- * Tested HF `datasets` and webdataset wrapper streaming from HF hub with recent `timm` ImageNet uploads to https://huggingface.co/timm
305
- * Make input & target column/field keys consistent across datasets and pass via args
306
- * Full monochrome support when using e:g: `--input-size 1 224 224` or `--in-chans 1`, sets PIL image conversion appropriately in dataset
307
- * Improved several alternate crop & resize transforms (ResizeKeepRatio, RandomCropOrPad, etc) for use in PixParse document AI project
308
- * Add SimCLR style color jitter prob along with grayscale and gaussian blur options to augmentations and args
309
- * Allow train without validation set (`--val-split ''`) in train script
310
- * Add `--bce-sum` (sum over class dim) and `--bce-pos-weight` (positive weighting) args for training as they're common BCE loss tweaks I was often hard coding
311
-
312
- ### Nov 23, 2023
313
- * Added EfficientViT-Large models, thanks [SeeFun](https://github.com/seefun)
314
- * Fix Python 3.7 compat, will be dropping support for it soon
315
- * Other misc fixes
316
- * Release 0.9.12
317
-
318
- ### Nov 20, 2023
319
- * Added significant flexibility for Hugging Face Hub based timm models via `model_args` config entry. `model_args` will be passed as kwargs through to models on creation.
320
- * See example at https://huggingface.co/gaunernst/vit_base_patch16_1024_128.audiomae_as2m_ft_as20k/blob/main/config.json
321
- * Usage: https://github.com/huggingface/pytorch-image-models/discussions/2035
322
- * Updated imagenet eval and test set csv files with latest models
323
- * `vision_transformer.py` typing and doc cleanup by [Laureηt](https://github.com/Laurent2916)
324
- * 0.9.11 release
325
-
326
- ### Nov 3, 2023
327
- * [DFN (Data Filtering Networks)](https://huggingface.co/papers/2309.17425) and [MetaCLIP](https://huggingface.co/papers/2309.16671) ViT weights added
328
- * DINOv2 'register' ViT model weights added (https://huggingface.co/papers/2309.16588, https://huggingface.co/papers/2304.07193)
329
- * Add `quickgelu` ViT variants for OpenAI, DFN, MetaCLIP weights that use it (less efficient)
330
- * Improved typing added to ResNet, MobileNet-v3 thanks to [Aryan](https://github.com/a-r-r-o-w)
331
- * ImageNet-12k fine-tuned (from LAION-2B CLIP) `convnext_xxlarge`
332
- * 0.9.9 release
333
-
334
- ### Oct 20, 2023
335
- * [SigLIP](https://huggingface.co/papers/2303.15343) image tower weights supported in `vision_transformer.py`.
336
- * Great potential for fine-tune and downstream feature use.
337
- * Experimental 'register' support in vit models as per [Vision Transformers Need Registers](https://huggingface.co/papers/2309.16588)
338
- * Updated RepViT with new weight release. Thanks [wangao](https://github.com/jameslahm)
339
- * Add patch resizing support (on pretrained weight load) to Swin models
340
- * 0.9.8 release pending
341
-
342
- ### Sep 1, 2023
343
- * TinyViT added by [SeeFun](https://github.com/seefun)
344
- * Fix EfficientViT (MIT) to use torch.autocast so it works back to PT 1.10
345
- * 0.9.7 release
346
-
347
  ## Introduction
348
 
349
  Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results.
@@ -486,7 +469,7 @@ Included optimizers available via `timm.optim.create_optimizer_v2` factory metho
486
  * `madgrad` an implementation of MADGRAD adapted from https://github.com/facebookresearch/madgrad - https://arxiv.org/abs/2101.11075
487
  * `mars` MARS optimizer from https://github.com/AGI-Arena/MARS - https://arxiv.org/abs/2411.10438
488
  * `nadam` an implementation of Adam w/ Nesterov momentum
489
- * `nadamw` an impementation of AdamW (Adam w/ decoupled weight-decay) w/ Nesterov momentum. A simplified impl based on https://github.com/mlcommons/algorithmic-efficiency
490
  * `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) - https://arxiv.org/abs/1905.11286
491
  * `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) - https://arxiv.org/abs/1908.03265
492
  * `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour
 
12
 
13
  ## What's New
14
 
15
+ ## Jan 19, 2025
16
+ * Fix loading of LeViT safetensor weights, remove conversion code which should have been deactivated
17
+ * Add 'SO150M' ViT weights trained with SBB recipes, decent results, but not optimal shape for ImageNet-12k/1k pretrain/ft
18
+ * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k_ft_in1k` - 86.7% top-1
19
+ * `vit_so150m_patch16_reg4_gap_384.sbb_e250_in12k_ft_in1k` - 87.4% top-1
20
+ * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k`
21
+ * Misc typing, typo, etc. cleanup
22
+ * 1.0.14 release to get above LeViT fix out
23
+
24
+ ## Jan 9, 2025
25
+ * Add support to train and validate in pure `bfloat16` or `float16`
26
+ * `wandb` project name arg added by https://github.com/caojiaolong, use arg.experiment for name
27
+ * Fix old issue w/ checkpoint saving not working on filesystem w/o hard-link support (e.g. FUSE fs mounts)
28
+ * 1.0.13 release
29
+
30
+ ## Jan 6, 2025
31
+ * Add `torch.utils.checkpoint.checkpoint()` wrapper in `timm.models` that defaults `use_reentrant=False`, unless `TIMM_REENTRANT_CKPT=1` is set in env.
32
+
33
+ ## Dec 31, 2024
34
+ * `convnext_nano` 384x384 ImageNet-12k pretrain & fine-tune. https://huggingface.co/models?search=convnext_nano%20r384
35
+ * Add AIM-v2 encoders from https://github.com/apple/ml-aim, see on Hub: https://huggingface.co/models?search=timm%20aimv2
36
+ * Add PaliGemma2 encoders from https://github.com/google-research/big_vision to existing PaliGemma, see on Hub: https://huggingface.co/models?search=timm%20pali2
37
+ * Add missing L/14 DFN2B 39B CLIP ViT, `vit_large_patch14_clip_224.dfn2b_s39b`
38
+ * Fix existing `RmsNorm` layer & fn to match standard formulation, use PT 2.5 impl when possible. Move old impl to `SimpleNorm` layer, it's LN w/o centering or bias. There were only two `timm` models using it, and they have been updated.
39
+ * Allow override of `cache_dir` arg for model creation
40
+ * Pass through `trust_remote_code` for HF datasets wrapper
41
+ * `inception_next_atto` model added by creator
42
+ * Adan optimizer caution, and Lamb decoupled weighgt decay options
43
+ * Some feature_info metadata fixed by https://github.com/brianhou0208
44
+ * All OpenCLIP and JAX (CLIP, SigLIP, Pali, etc) model weights that used load time remapping were given their own HF Hub instances so that they work with `hf-hub:` based loading, and thus will work with new Transformers `TimmWrapperModel`
45
+
46
  ## Nov 28, 2024
47
  * More optimizers
48
  * Add MARS optimizer (https://arxiv.org/abs/2411.10438, https://github.com/AGI-Arena/MARS)
 
125
  * [mobilenetv3_large_150d.ra4_e3600_r256_in1k](http://hf.co/timm/mobilenetv3_large_150d.ra4_e3600_r256_in1k) - 81.81 @ 320, 80.94 @ 256
126
  * [mobilenetv3_large_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv3_large_100.ra4_e3600_r224_in1k) - 77.16 @ 256, 76.31 @ 224
127
 
 
128
  ### Aug 21, 2024
129
  * Updated SBB ViT models trained on ImageNet-12k and fine-tuned on ImageNet-1k, challenging quite a number of much larger, slower models
130
 
 
246
  ### May 14, 2024
247
  * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling.
248
  * Add Hiera models from Meta (https://github.com/facebookresearch/hiera).
249
+ * Add `normalize=` flag for transforms, return non-normalized torch.Tensor with original dytpe (for `chug`)
250
  * Version 1.0.3 release
251
 
252
  ### May 11, 2024
 
278
  ### April 11, 2024
279
  * Prepping for a long overdue 1.0 release, things have been stable for a while now.
280
  * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`)
281
+ * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or directly.
282
  ```python
283
  model = timm.create_model('vit_base_patch16_224')
284
  final_feat, intermediates = model.forward_intermediates(input)
 
327
  * Min supported Python version increased to 3.8
328
  * Release 0.9.16
329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  ## Introduction
331
 
332
  Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results.
 
469
  * `madgrad` an implementation of MADGRAD adapted from https://github.com/facebookresearch/madgrad - https://arxiv.org/abs/2101.11075
470
  * `mars` MARS optimizer from https://github.com/AGI-Arena/MARS - https://arxiv.org/abs/2411.10438
471
  * `nadam` an implementation of Adam w/ Nesterov momentum
472
+ * `nadamw` an implementation of AdamW (Adam w/ decoupled weight-decay) w/ Nesterov momentum. A simplified impl based on https://github.com/mlcommons/algorithmic-efficiency
473
  * `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) - https://arxiv.org/abs/1905.11286
474
  * `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) - https://arxiv.org/abs/1908.03265
475
  * `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour
pytorch-image-models/UPGRADING.md CHANGED
@@ -1,10 +1,10 @@
1
  # Upgrading from previous versions
2
 
3
- I generally try to maintain code interface and especially model weight compability across many `timm` versions. Sometimes there are exceptions.
4
 
5
  ## Checkpoint remapping
6
 
7
- Pretrained weight remapping is handled by `checkpoint_filter_fn` in a model implementation module. This remaps old pretrained checkpoints to new, and also 3rd party (original) checkpoints to `timm` format if the model was modified when brough into `timm`.
8
 
9
  The `checkpoint_filter_fn` is automatically called when loading pretrained weights via `pretrained=True`, but they can be called manually if you call the fn directly with the current model instance and old state dict.
10
 
@@ -19,6 +19,6 @@ Many changes were made since the 0.6.x stable releases. They were previewed in 0
19
  * The pretrained_tag is the specific weight variant (different head) for the architecture.
20
  * Using only `architecture` defaults to the first weights in the default_cfgs for that model architecture.
21
  * In adding pretrained tags, many model names that existed to differentiate were renamed to use the tag (ex: `vit_base_patch16_224_in21k` -> `vit_base_patch16_224.augreg_in21k`). There are deprecation mappings for these.
22
- * A number of models had their checkpoints remaped to match architecture changes needed to better support `features_only=True`, there are `checkpoint_filter_fn` methods in any model module that was remapped. These can be passed to `timm.models.load_checkpoint(..., filter_fn=timm.models.swin_transformer_v2.checkpoint_filter_fn)` to remap your existing checkpoint.
23
  * The Hugging Face Hub (https://huggingface.co/timm) is now the primary source for `timm` weights. Model cards include link to papers, original source, license.
24
  * Previous 0.6.x can be cloned from [0.6.x](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) branch or installed via pip with version.
 
1
  # Upgrading from previous versions
2
 
3
+ I generally try to maintain code interface and especially model weight compatibility across many `timm` versions. Sometimes there are exceptions.
4
 
5
  ## Checkpoint remapping
6
 
7
+ Pretrained weight remapping is handled by `checkpoint_filter_fn` in a model implementation module. This remaps old pretrained checkpoints to new, and also 3rd party (original) checkpoints to `timm` format if the model was modified when brought into `timm`.
8
 
9
  The `checkpoint_filter_fn` is automatically called when loading pretrained weights via `pretrained=True`, but they can be called manually if you call the fn directly with the current model instance and old state dict.
10
 
 
19
  * The pretrained_tag is the specific weight variant (different head) for the architecture.
20
  * Using only `architecture` defaults to the first weights in the default_cfgs for that model architecture.
21
  * In adding pretrained tags, many model names that existed to differentiate were renamed to use the tag (ex: `vit_base_patch16_224_in21k` -> `vit_base_patch16_224.augreg_in21k`). There are deprecation mappings for these.
22
+ * A number of models had their checkpoints remapped to match architecture changes needed to better support `features_only=True`, there are `checkpoint_filter_fn` methods in any model module that was remapped. These can be passed to `timm.models.load_checkpoint(..., filter_fn=timm.models.swin_transformer_v2.checkpoint_filter_fn)` to remap your existing checkpoint.
23
  * The Hugging Face Hub (https://huggingface.co/timm) is now the primary source for `timm` weights. Model cards include link to papers, original source, license.
24
  * Previous 0.6.x can be cloned from [0.6.x](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) branch or installed via pip with version.
pytorch-image-models/benchmark.py CHANGED
@@ -88,8 +88,8 @@ parser.add_argument('-b', '--batch-size', default=256, type=int,
88
  metavar='N', help='mini-batch size (default: 256)')
89
  parser.add_argument('--img-size', default=None, type=int,
90
  metavar='N', help='Input image dimension, uses model default if empty')
91
- parser.add_argument('--input-size', default=None, nargs=3, type=int,
92
- metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
93
  parser.add_argument('--use-train-size', action='store_true', default=False,
94
  help='Run inference at train size, not test-input-size if it exists.')
95
  parser.add_argument('--num-classes', type=int, default=None,
 
88
  metavar='N', help='mini-batch size (default: 256)')
89
  parser.add_argument('--img-size', default=None, type=int,
90
  metavar='N', help='Input image dimension, uses model default if empty')
91
+ parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N',
92
+ help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
93
  parser.add_argument('--use-train-size', action='store_true', default=False,
94
  help='Run inference at train size, not test-input-size if it exists.')
95
  parser.add_argument('--num-classes', type=int, default=None,
pytorch-image-models/hfdocs/source/changes.mdx CHANGED
@@ -1,5 +1,146 @@
1
  # Changelog
2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ### Aug 8, 2024
4
  * Add RDNet ('DenseNets Reloaded', https://arxiv.org/abs/2403.19588), thanks [Donghyun Kim](https://github.com/dhkim0225)
5
 
@@ -93,7 +234,7 @@
93
  ### May 14, 2024
94
  * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling.
95
  * Add Hiera models from Meta (https://github.com/facebookresearch/hiera).
96
- * Add `normalize=` flag for transorms, return non-normalized torch.Tensor with original dytpe (for `chug`)
97
  * Version 1.0.3 release
98
 
99
  ### May 11, 2024
@@ -125,7 +266,7 @@
125
  ### April 11, 2024
126
  * Prepping for a long overdue 1.0 release, things have been stable for a while now.
127
  * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`)
128
- * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or direclty.
129
  ```python
130
  model = timm.create_model('vit_base_patch16_224')
131
  final_feat, intermediates = model.forward_intermediates(input)
@@ -360,7 +501,7 @@ Datasets & transform refactoring
360
  * 0.8.15dev0
361
 
362
  ### Feb 20, 2023
363
- * Add 320x320 `convnext_large_mlp.clip_laion2b_ft_320` and `convnext_lage_mlp.clip_laion2b_ft_soup_320` CLIP image tower weights for features & fine-tune
364
  * 0.8.13dev0 pypi release for latest changes w/ move to huggingface org
365
 
366
  ### Feb 16, 2023
@@ -745,7 +886,7 @@ More models, more fixes
745
  * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler
746
  * Gradient checkpointing support added to many models
747
  * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head`
748
- * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `foward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head`
749
 
750
  ### Feb 2, 2022
751
  * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055)
@@ -1058,7 +1199,7 @@ More models, more fixes
1058
  * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler
1059
  * Gradient checkpointing support added to many models
1060
  * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head`
1061
- * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `foward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head`
1062
 
1063
  ### Feb 2, 2022
1064
  * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055)
 
1
  # Changelog
2
 
3
+ ## Jan 19, 2025
4
+ * Fix loading of LeViT safetensor weights, remove conversion code which should have been deactivated
5
+ * Add 'SO150M' ViT weights trained with SBB recipes, decent results, but not optimal shape for ImageNet-12k/1k pretrain/ft
6
+ * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k_ft_in1k` - 86.7% top-1
7
+ * `vit_so150m_patch16_reg4_gap_384.sbb_e250_in12k_ft_in1k` - 87.4% top-1
8
+ * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k`
9
+ * Misc typing, typo, etc. cleanup
10
+ * 1.0.14 release to get above LeViT fix out
11
+
12
+ ## Jan 9, 2025
13
+ * Add support to train and validate in pure `bfloat16` or `float16`
14
+ * `wandb` project name arg added by https://github.com/caojiaolong, use arg.experiment for name
15
+ * Fix old issue w/ checkpoint saving not working on filesystem w/o hard-link support (e.g. FUSE fs mounts)
16
+ * 1.0.13 release
17
+
18
+ ## Jan 6, 2025
19
+ * Add `torch.utils.checkpoint.checkpoint()` wrapper in `timm.models` that defaults `use_reentrant=False`, unless `TIMM_REENTRANT_CKPT=1` is set in env.
20
+
21
+ ## Dec 31, 2024
22
+ * `convnext_nano` 384x384 ImageNet-12k pretrain & fine-tune. https://huggingface.co/models?search=convnext_nano%20r384
23
+ * Add AIM-v2 encoders from https://github.com/apple/ml-aim, see on Hub: https://huggingface.co/models?search=timm%20aimv2
24
+ * Add PaliGemma2 encoders from https://github.com/google-research/big_vision to existing PaliGemma, see on Hub: https://huggingface.co/models?search=timm%20pali2
25
+ * Add missing L/14 DFN2B 39B CLIP ViT, `vit_large_patch14_clip_224.dfn2b_s39b`
26
+ * Fix existing `RmsNorm` layer & fn to match standard formulation, use PT 2.5 impl when possible. Move old impl to `SimpleNorm` layer, it's LN w/o centering or bias. There were only two `timm` models using it, and they have been updated.
27
+ * Allow override of `cache_dir` arg for model creation
28
+ * Pass through `trust_remote_code` for HF datasets wrapper
29
+ * `inception_next_atto` model added by creator
30
+ * Adan optimizer caution, and Lamb decoupled weighgt decay options
31
+ * Some feature_info metadata fixed by https://github.com/brianhou0208
32
+ * All OpenCLIP and JAX (CLIP, SigLIP, Pali, etc) model weights that used load time remapping were given their own HF Hub instances so that they work with `hf-hub:` based loading, and thus will work with new Transformers `TimmWrapperModel`
33
+
34
+ ## Nov 28, 2024
35
+ * More optimizers
36
+ * Add MARS optimizer (https://arxiv.org/abs/2411.10438, https://github.com/AGI-Arena/MARS)
37
+ * Add LaProp optimizer (https://arxiv.org/abs/2002.04839, https://github.com/Z-T-WANG/LaProp-Optimizer)
38
+ * Add masking from 'Cautious Optimizers' (https://arxiv.org/abs/2411.16085, https://github.com/kyleliang919/C-Optim) to Adafactor, Adafactor Big Vision, AdamW (legacy), Adopt, Lamb, LaProp, Lion, NadamW, RMSPropTF, SGDW
39
+ * Cleanup some docstrings and type annotations re optimizers and factory
40
+ * Add MobileNet-V4 Conv Medium models pretrained on in12k and fine-tuned in1k @ 384x384
41
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k
42
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k
43
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_ad_r384_in12k
44
+ * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_r384_in12k
45
+ * Add small cs3darknet, quite good for the speed
46
+ * https://huggingface.co/timm/cs3darknet_focus_s.ra4_e3600_r256_in1k
47
+
48
+ ## Nov 12, 2024
49
+ * Optimizer factory refactor
50
+ * New factory works by registering optimizers using an OptimInfo dataclass w/ some key traits
51
+ * Add `list_optimizers`, `get_optimizer_class`, `get_optimizer_info` to reworked `create_optimizer_v2` fn to explore optimizers, get info or class
52
+ * deprecate `optim.optim_factory`, move fns to `optim/_optim_factory.py` and `optim/_param_groups.py` and encourage import via `timm.optim`
53
+ * Add Adopt (https://github.com/iShohei220/adopt) optimizer
54
+ * Add 'Big Vision' variant of Adafactor (https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) optimizer
55
+ * Fix original Adafactor to pick better factorization dims for convolutions
56
+ * Tweak LAMB optimizer with some improvements in torch.where functionality since original, refactor clipping a bit
57
+ * dynamic img size support in vit, deit, eva improved to support resize from non-square patch grids, thanks https://github.com/wojtke
58
+ *
59
+ ## Oct 31, 2024
60
+ Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weights. See https://huggingface.co/blog/rwightman/resnet-trick-or-treat
61
+
62
+ ## Oct 19, 2024
63
+ * Cleanup torch amp usage to avoid cuda specific calls, merge support for Ascend (NPU) devices from [MengqingCao](https://github.com/MengqingCao) that should work now in PyTorch 2.5 w/ new device extension autoloading feature. Tested Intel Arc (XPU) in Pytorch 2.5 too and it (mostly) worked.
64
+
65
+ ## Oct 16, 2024
66
+ * Fix error on importing from deprecated path `timm.models.registry`, increased priority of existing deprecation warnings to be visible
67
+ * Port weights of InternViT-300M (https://huggingface.co/OpenGVLab/InternViT-300M-448px) to `timm` as `vit_intern300m_patch14_448`
68
+
69
+ ### Oct 14, 2024
70
+ * Pre-activation (ResNetV2) version of 18/18d/34/34d ResNet model defs added by request (weights pending)
71
+ * Release 1.0.10
72
+
73
+ ### Oct 11, 2024
74
+ * MambaOut (https://github.com/yuweihao/MambaOut) model & weights added. A cheeky take on SSM vision models w/o the SSM (essentially ConvNeXt w/ gating). A mix of original weights + custom variations & weights.
75
+
76
+ |model |img_size|top1 |top5 |param_count|
77
+ |---------------------------------------------------------------------------------------------------------------------|--------|------|------|-----------|
78
+ |[mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k)|384 |87.506|98.428|101.66 |
79
+ |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|288 |86.912|98.236|101.66 |
80
+ |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|224 |86.632|98.156|101.66 |
81
+ |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |288 |84.974|97.332|86.48 |
82
+ |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |288 |84.962|97.208|94.45 |
83
+ |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |288 |84.832|97.27 |88.83 |
84
+ |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |288 |84.72 |96.93 |84.81 |
85
+ |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |288 |84.598|97.098|48.5 |
86
+ |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |288 |84.5 |96.974|48.49 |
87
+ |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |224 |84.454|96.864|94.45 |
88
+ |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |224 |84.434|96.958|86.48 |
89
+ |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |224 |84.362|96.952|88.83 |
90
+ |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |224 |84.168|96.68 |84.81 |
91
+ |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |224 |84.086|96.63 |48.49 |
92
+ |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |224 |84.024|96.752|48.5 |
93
+ |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |288 |83.448|96.538|26.55 |
94
+ |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |224 |82.736|96.1 |26.55 |
95
+ |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |288 |81.054|95.718|9.14 |
96
+ |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |224 |79.986|94.986|9.14 |
97
+ |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |288 |79.848|95.14 |7.3 |
98
+ |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |224 |78.87 |94.408|7.3 |
99
+
100
+ * SigLIP SO400M ViT fine-tunes on ImageNet-1k @ 378x378, added 378x378 option for existing SigLIP 384x384 models
101
+ * [vit_so400m_patch14_siglip_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_378.webli_ft_in1k) - 89.42 top-1
102
+ * [vit_so400m_patch14_siglip_gap_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_gap_378.webli_ft_in1k) - 89.03
103
+ * SigLIP SO400M ViT encoder from recent multi-lingual (i18n) variant, patch16 @ 256x256 (https://huggingface.co/timm/ViT-SO400M-16-SigLIP-i18n-256). OpenCLIP update pending.
104
+ * Add two ConvNeXt 'Zepto' models & weights (one w/ overlapped stem and one w/ patch stem). Uses RMSNorm, smaller than previous 'Atto', 2.2M params.
105
+ * [convnext_zepto_rms_ols.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms_ols.ra4_e3600_r224_in1k) - 73.20 top-1 @ 224
106
+ * [convnext_zepto_rms.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms.ra4_e3600_r224_in1k) - 72.81 @ 224
107
+
108
+ ### Sept 2024
109
+ * Add a suite of tiny test models for improved unit tests and niche low-resource applications (https://huggingface.co/blog/rwightman/timm-tiny-test)
110
+ * Add MobileNetV4-Conv-Small (0.5x) model (https://huggingface.co/posts/rwightman/793053396198664)
111
+ * [mobilenetv4_conv_small_050.e3000_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k) - 65.81 top-1 @ 256, 64.76 @ 224
112
+ * Add MobileNetV3-Large variants trained with MNV4 Small recipe
113
+ * [mobilenetv3_large_150d.ra4_e3600_r256_in1k](http://hf.co/timm/mobilenetv3_large_150d.ra4_e3600_r256_in1k) - 81.81 @ 320, 80.94 @ 256
114
+ * [mobilenetv3_large_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv3_large_100.ra4_e3600_r224_in1k) - 77.16 @ 256, 76.31 @ 224
115
+
116
+ ### Aug 21, 2024
117
+ * Updated SBB ViT models trained on ImageNet-12k and fine-tuned on ImageNet-1k, challenging quite a number of much larger, slower models
118
+
119
+ | model | top1 | top5 | param_count | img_size |
120
+ | -------------------------------------------------- | ------ | ------ | ----------- | -------- |
121
+ | [vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 87.438 | 98.256 | 64.11 | 384 |
122
+ | [vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 86.608 | 97.934 | 64.11 | 256 |
123
+ | [vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 86.594 | 98.02 | 60.4 | 384 |
124
+ | [vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 85.734 | 97.61 | 60.4 | 256 |
125
+ * MobileNet-V1 1.25, EfficientNet-B1, & ResNet50-D weights w/ MNV4 baseline challenge recipe
126
+
127
+ | model | top1 | top5 | param_count | img_size |
128
+ |--------------------------------------------------------------------------------------------------------------------------|--------|--------|-------------|----------|
129
+ | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 81.838 | 95.922 | 25.58 | 288 |
130
+ | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 81.440 | 95.700 | 7.79 | 288 |
131
+ | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 80.952 | 95.384 | 25.58 | 224 |
132
+ | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 80.406 | 95.152 | 7.79 | 240 |
133
+ | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 77.600 | 93.804 | 6.27 | 256 |
134
+ | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 76.924 | 93.234 | 6.27 | 224 |
135
+
136
+ * Add SAM2 (HieraDet) backbone arch & weight loading support
137
+ * Add Hiera Small weights trained w/ abswin pos embed on in12k & fine-tuned on 1k
138
+
139
+ |model |top1 |top5 |param_count|
140
+ |---------------------------------|------|------|-----------|
141
+ |hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k |84.912|97.260|35.01 |
142
+ |hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k |84.560|97.106|35.01 |
143
+
144
  ### Aug 8, 2024
145
  * Add RDNet ('DenseNets Reloaded', https://arxiv.org/abs/2403.19588), thanks [Donghyun Kim](https://github.com/dhkim0225)
146
 
 
234
  ### May 14, 2024
235
  * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling.
236
  * Add Hiera models from Meta (https://github.com/facebookresearch/hiera).
237
+ * Add `normalize=` flag for transforms, return non-normalized torch.Tensor with original dytpe (for `chug`)
238
  * Version 1.0.3 release
239
 
240
  ### May 11, 2024
 
266
  ### April 11, 2024
267
  * Prepping for a long overdue 1.0 release, things have been stable for a while now.
268
  * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`)
269
+ * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or directly.
270
  ```python
271
  model = timm.create_model('vit_base_patch16_224')
272
  final_feat, intermediates = model.forward_intermediates(input)
 
501
  * 0.8.15dev0
502
 
503
  ### Feb 20, 2023
504
+ * Add 320x320 `convnext_large_mlp.clip_laion2b_ft_320` and `convnext_large_mlp.clip_laion2b_ft_soup_320` CLIP image tower weights for features & fine-tune
505
  * 0.8.13dev0 pypi release for latest changes w/ move to huggingface org
506
 
507
  ### Feb 16, 2023
 
886
  * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler
887
  * Gradient checkpointing support added to many models
888
  * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head`
889
+ * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `forward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head`
890
 
891
  ### Feb 2, 2022
892
  * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055)
 
1199
  * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler
1200
  * Gradient checkpointing support added to many models
1201
  * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head`
1202
+ * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `forward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head`
1203
 
1204
  ### Feb 2, 2022
1205
  * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055)
pytorch-image-models/hfdocs/source/models.mdx CHANGED
@@ -33,7 +33,7 @@ A more exciting view (with pretty pictures) of the models within `timm` can be f
33
  ## DLA
34
 
35
  * Implementation: [dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)
36
- * Paper: https://arxiv.org/abs/1707.06484
37
  * Code: https://github.com/ucbdrive/dla
38
 
39
  ## Dual-Path Networks
@@ -78,14 +78,14 @@ A more exciting view (with pretty pictures) of the models within `timm` can be f
78
  ## NASNet-A
79
 
80
  * Implementation: [nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)
81
- * Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012
82
  * Code: https://github.com/Cadene/pretrained-models.pytorch
83
  * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
84
 
85
  ## PNasNet-5
86
 
87
  * Implementation: [pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)
88
- * Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559
89
  * Code: https://github.com/Cadene/pretrained-models.pytorch
90
  * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
91
 
 
33
  ## DLA
34
 
35
  * Implementation: [dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)
36
+ * Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484
37
  * Code: https://github.com/ucbdrive/dla
38
 
39
  ## Dual-Path Networks
 
78
  ## NASNet-A
79
 
80
  * Implementation: [nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)
81
+ * Paper: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012
82
  * Code: https://github.com/Cadene/pretrained-models.pytorch
83
  * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
84
 
85
  ## PNasNet-5
86
 
87
  * Implementation: [pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)
88
+ * Paper: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559
89
  * Code: https://github.com/Cadene/pretrained-models.pytorch
90
  * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
91
 
pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx CHANGED
@@ -1,6 +1,6 @@
1
  # Adversarial Inception v3
2
 
3
- **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
 
5
  This particular model was trained for study of adversarial examples (adversarial training).
6
 
@@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
77
 
78
  ## How do I train this model?
79
 
80
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
 
82
  ## Citation
83
 
 
1
  # Adversarial Inception v3
2
 
3
+ **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifier](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
 
5
  This particular model was trained for study of adversarial examples (adversarial training).
6
 
 
77
 
78
  ## How do I train this model?
79
 
80
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
81
 
82
  ## Citation
83
 
pytorch-image-models/hfdocs/source/models/advprop.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/big-transfer.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/csp-darknet.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/csp-resnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/csp-resnext.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/densenet.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/dla.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/dpn.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/ecaresnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx CHANGED
@@ -1,6 +1,6 @@
1
  # EfficientNet (Knapsack Pruned)
2
 
3
- **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
4
 
5
  The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
6
 
@@ -79,7 +79,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
79
 
80
  ## How do I train this model?
81
 
82
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
83
 
84
  ## Citation
85
 
 
1
  # EfficientNet (Knapsack Pruned)
2
 
3
+ **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way.
4
 
5
  The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
6
 
 
79
 
80
  ## How do I train this model?
81
 
82
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
83
 
84
  ## Citation
85
 
pytorch-image-models/hfdocs/source/models/efficientnet.mdx CHANGED
@@ -1,6 +1,6 @@
1
  # EfficientNet
2
 
3
- **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
4
 
5
  The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
6
 
@@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
77
 
78
  ## How do I train this model?
79
 
80
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
 
82
  ## Citation
83
 
 
1
  # EfficientNet
2
 
3
+ **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way.
4
 
5
  The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
6
 
 
77
 
78
  ## How do I train this model?
79
 
80
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
81
 
82
  ## Citation
83
 
pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx CHANGED
@@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
77
 
78
  ## How do I train this model?
79
 
80
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
 
82
  ## Citation
83
 
 
77
 
78
  ## How do I train this model?
79
 
80
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
81
 
82
  ## Citation
83
 
pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/fbnet.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/gloun-inception-v3.mdx CHANGED
@@ -1,6 +1,6 @@
1
  # (Gluon) Inception v3
2
 
3
- **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
 
5
  The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
6
 
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
1
  # (Gluon) Inception v3
2
 
3
+ **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifier](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
 
5
  The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
6
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/gloun-senet.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/gloun-xception.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/hrnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/ig-resnext.mdx CHANGED
@@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
77
 
78
  ## How do I train this model?
79
 
80
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
 
82
  ## Citation
83
 
 
77
 
78
  ## How do I train this model?
79
 
80
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
81
 
82
  ## Citation
83
 
pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/inception-v3.mdx CHANGED
@@ -1,6 +1,6 @@
1
  # Inception v3
2
 
3
- **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
 
5
  ## How do I use this model on an image?
6
 
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
1
  # Inception v3
2
 
3
+ **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifier](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
4
 
5
  ## How do I use this model on an image?
6
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/inception-v4.mdx CHANGED
@@ -72,7 +72,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
72
 
73
  ## How do I train this model?
74
 
75
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
76
 
77
  ## Citation
78
 
 
72
 
73
  ## How do I train this model?
74
 
75
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
76
 
77
  ## Citation
78
 
pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/legacy-senet.mdx CHANGED
@@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
75
 
76
  ## How do I train this model?
77
 
78
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
79
 
80
  ## Citation
81
 
 
75
 
76
  ## How do I train this model?
77
 
78
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
79
 
80
  ## Citation
81
 
pytorch-image-models/hfdocs/source/models/mixnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/mnasnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/nasnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/noisy-student.mdx CHANGED
@@ -82,7 +82,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
82
 
83
  ## How do I train this model?
84
 
85
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
86
 
87
  ## Citation
88
 
 
82
 
83
  ## How do I train this model?
84
 
85
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
86
 
87
  ## Citation
88
 
pytorch-image-models/hfdocs/source/models/pnasnet.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/regnetx.mdx CHANGED
@@ -1,10 +1,10 @@
1
  # RegNetX
2
 
3
- **RegNetX** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure):
4
 
5
- \\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\)
6
 
7
- For **RegNetX** we have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier).
8
 
9
  ## How do I use this model on an image?
10
 
@@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
77
 
78
  ## How do I train this model?
79
 
80
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
81
 
82
  ## Citation
83
 
 
1
  # RegNetX
2
 
3
+ **RegNetX** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w_{0} > 0 \\), and slope \\( w_{a} > 0 \\), and generates a different block width \\( u_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure):
4
 
5
+ \\( u_{j} = w_{0} + w_{a}\cdot{j} \\)
6
 
7
+ For **RegNetX** we have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w_{m} \geq 2 \\) (the width multiplier).
8
 
9
  ## How do I use this model on an image?
10
 
 
77
 
78
  ## How do I train this model?
79
 
80
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
81
 
82
  ## Citation
83
 
pytorch-image-models/hfdocs/source/models/regnety.mdx CHANGED
@@ -1,10 +1,10 @@
1
  # RegNetY
2
 
3
- **RegNetY** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure):
4
 
5
- \\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\)
6
 
7
- For **RegNetX** authors have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier).
8
 
9
  For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
10
 
@@ -79,7 +79,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
79
 
80
  ## How do I train this model?
81
 
82
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
83
 
84
  ## Citation
85
 
 
1
  # RegNetY
2
 
3
+ **RegNetY** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w_{0} > 0 \\), and slope \\( w_{a} > 0 \\), and generates a different block width \\( u_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure):
4
 
5
+ \\( u_{j} = w_{0} + w_{a}\cdot{j} \\)
6
 
7
+ For **RegNetX** authors have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w_{m} \geq 2 \\) (the width multiplier).
8
 
9
  For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
10
 
 
79
 
80
  ## How do I train this model?
81
 
82
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
83
 
84
  ## Citation
85
 
pytorch-image-models/hfdocs/source/models/res2net.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/res2next.mdx CHANGED
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79
 
pytorch-image-models/hfdocs/source/models/resnest.mdx CHANGED
@@ -1,6 +1,6 @@
1
  # ResNeSt
2
 
3
- A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: \\( V = \text{Concat} \\){\\( V^{1},V^{2},\cdots{V}^{K} \\)}. As in standard residual blocks, the final output \\( Y \\) of otheur Split-Attention block is produced using a shortcut connection: \\( Y=V+X \\), if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation \\( \mathcal{T} \\) is applied to the shortcut connection to align the output shapes: \\( Y=V+\mathcal{T}(X) \\). For example, \\( \mathcal{T} \\) can be strided convolution or combined convolution-with-pooling.
4
 
5
  ## How do I use this model on an image?
6
 
@@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py)
73
 
74
  ## How do I train this model?
75
 
76
- You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
77
 
78
  ## Citation
79
 
 
1
  # ResNeSt
2
 
3
+ A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: \\( V = \text{Concat} \{ V^{1},V^{2},\cdots,{V}^{K} \} \\). As in standard residual blocks, the final output \\( Y \\) of otheur Split-Attention block is produced using a shortcut connection: \\( Y=V+X \\), if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation \\( \mathcal{T} \\) is applied to the shortcut connection to align the output shapes: \\( Y=V+\mathcal{T}(X) \\). For example, \\( \mathcal{T} \\) can be strided convolution or combined convolution-with-pooling.
4
 
5
  ## How do I use this model on an image?
6
 
 
73
 
74
  ## How do I train this model?
75
 
76
+ You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
77
 
78
  ## Citation
79