timm
/

Image Classification
timm
PyTorch
Safetensors
rwightman HF staff commited on
Commit
93de95e
1 Parent(s): a3c88ae

Update model config and README

Browse files
Files changed (2) hide show
  1. README.md +21 -17
  2. model.safetensors +3 -0
README.md CHANGED
@@ -2,7 +2,7 @@
2
  tags:
3
  - image-classification
4
  - timm
5
- library_tag: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
@@ -12,7 +12,7 @@ datasets:
12
  An official MaxViT image classification model. Trained in tensorflow on ImageNet-1k by paper authors.
13
  Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman.
14
 
15
- ### Model Variants in [maxxvit.py](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/maxxvit.py)
16
 
17
  MaxxViT covers a number of related model architectures that share a common structure including:
18
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
@@ -42,8 +42,9 @@ from urllib.request import urlopen
42
  from PIL import Image
43
  import timm
44
 
45
- img = Image.open(
46
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
47
 
48
  model = timm.create_model('maxvit_large_tf_224.in1k', pretrained=True)
49
  model = model.eval()
@@ -63,8 +64,9 @@ from urllib.request import urlopen
63
  from PIL import Image
64
  import timm
65
 
66
- img = Image.open(
67
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
68
 
69
  model = timm.create_model(
70
  'maxvit_large_tf_224.in1k',
@@ -81,12 +83,13 @@ output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batc
81
 
82
  for o in output:
83
  # print shape of each feature map in output
84
- # e.g.:
85
- # torch.Size([1, 128, 192, 192])
86
- # torch.Size([1, 128, 96, 96])
87
- # torch.Size([1, 256, 48, 48])
88
- # torch.Size([1, 512, 24, 24])
89
- # torch.Size([1, 1024, 12, 12])
 
90
  print(o.shape)
91
  ```
92
 
@@ -96,8 +99,9 @@ from urllib.request import urlopen
96
  from PIL import Image
97
  import timm
98
 
99
- img = Image.open(
100
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
101
 
102
  model = timm.create_model(
103
  'maxvit_large_tf_224.in1k',
@@ -115,10 +119,10 @@ output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_featu
115
  # or equivalently (without needing to set num_classes=0)
116
 
117
  output = model.forward_features(transforms(img).unsqueeze(0))
118
- # output is unpooled (ie.e a (batch_size, num_features, H, W) tensor
119
 
120
  output = model.forward_head(output, pre_logits=True)
121
- # output is (batch_size, num_features) tensor
122
  ```
123
 
124
  ## Model Comparison
@@ -226,7 +230,7 @@ output = model.forward_head(output, pre_logits=True)
226
  publisher = {GitHub},
227
  journal = {GitHub repository},
228
  doi = {10.5281/zenodo.4414861},
229
- howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
230
  }
231
  ```
232
  ```bibtex
 
2
  tags:
3
  - image-classification
4
  - timm
5
+ library_name: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
 
12
  An official MaxViT image classification model. Trained in tensorflow on ImageNet-1k by paper authors.
13
  Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman.
14
 
15
+ ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py)
16
 
17
  MaxxViT covers a number of related model architectures that share a common structure including:
18
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
 
42
  from PIL import Image
43
  import timm
44
 
45
+ img = Image.open(urlopen(
46
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
47
+ ))
48
 
49
  model = timm.create_model('maxvit_large_tf_224.in1k', pretrained=True)
50
  model = model.eval()
 
64
  from PIL import Image
65
  import timm
66
 
67
+ img = Image.open(urlopen(
68
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
69
+ ))
70
 
71
  model = timm.create_model(
72
  'maxvit_large_tf_224.in1k',
 
83
 
84
  for o in output:
85
  # print shape of each feature map in output
86
+ # e.g.:
87
+ # torch.Size([1, 128, 112, 112])
88
+ # torch.Size([1, 128, 56, 56])
89
+ # torch.Size([1, 256, 28, 28])
90
+ # torch.Size([1, 512, 14, 14])
91
+ # torch.Size([1, 1024, 7, 7])
92
+
93
  print(o.shape)
94
  ```
95
 
 
99
  from PIL import Image
100
  import timm
101
 
102
+ img = Image.open(urlopen(
103
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
104
+ ))
105
 
106
  model = timm.create_model(
107
  'maxvit_large_tf_224.in1k',
 
119
  # or equivalently (without needing to set num_classes=0)
120
 
121
  output = model.forward_features(transforms(img).unsqueeze(0))
122
+ # output is unpooled, a (1, 1024, 7, 7) shaped tensor
123
 
124
  output = model.forward_head(output, pre_logits=True)
125
+ # output is a (1, num_features) shaped tensor
126
  ```
127
 
128
  ## Model Comparison
 
230
  publisher = {GitHub},
231
  journal = {GitHub repository},
232
  doi = {10.5281/zenodo.4414861},
233
+ howpublished = {\url{https://github.com/huggingface/pytorch-image-models}}
234
  }
235
  ```
236
  ```bibtex
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0676c8fc607a05e6311f7738e02d5bd35dcb3c6bc9ee9f9b61bef051f0c3596
3
+ size 848062988