import torch import torchvision from torchvision.models import efficientnet_b2, EfficientNet_B2_Weights from torchvision.models._api import WeightsEnum from torch.hub import load_state_dict_from_url from torch import nn def get_state_dict(self, *args, **kwargs): kwargs.pop("check_hash") return load_state_dict_from_url(self.url, *args, **kwargs) def create_effnetb2_model(num_classes:int=3, seed:int=42): """Creates an EfficientNetB2 feature extractor model and transforms. Args: num_classes (int, optional): number of classes in the classifier head. Defaults to 3. seed (int, optional): random seed value. Defaults to 42. Returns: model (torch.nn.Module): EffNetB2 feature extractor model. transforms (torchvision.transforms): EffNetB2 image transforms. """ # Create EffNetB2 pretrained weights, transforms and model WeightsEnum.get_state_dict = get_state_dict efficientnet_b2(weights=EfficientNet_B2_Weights.DEFAULT) weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT transforms = weights.transforms() model = efficientnet_b2(weights=weights) # Freeze all layers in base model for param in model.parameters(): param.requires_grad = False # Change classifier head with random seed for reproducibility torch.manual_seed(seed) model.classifier = nn.Sequential( nn.Dropout(p=0.3, inplace=True), nn.Linear(in_features=1408, out_features=num_classes), ) return model, transforms