text
stringlengths 7
328k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
459
|
---|---|---|---|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import logging
import re
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, Optional, Union
import torch
from accelerate.hooks import AlignDevicesHook
from accelerate.utils import named_module_tensors, offload_state_dict
from torch import nn
from transformers import PreTrainedModel
from transformers.pytorch_utils import Conv1D
from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND
from ..config import PeftConfig
from ..utils import ModulesToSaveWrapper, _get_submodules
logger = logging.getLogger(__name__)
@contextmanager
def onload_layer(layer):
r"""
A utility for modifying a module containing one or more tuners and a base layer, any of which are offloaded to the
CPU or disk. Moves a module's sub-modules to the execution device before some action is performed, after that the
base layer state dictionary is re-assigned (if that layer was offloaded to the disk) and finally the parameters are
offloaded.
If the module has no offloaded sub-modules, this function does nothing.
Args:
layer ('torch.nn.Module'):
layer with tuners to be merged
"""
offloaded_modules = []
for name, module in layer.named_modules():
if name in ["", "base_layer"]:
continue
if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload:
module._hf_hook.pre_forward(module)
offloaded_modules.append(module)
base_layer_offload = False
if hasattr(layer, "base_layer") and (
hasattr(layer.base_layer, "_hf_hook")
and isinstance(layer.base_layer._hf_hook, AlignDevicesHook)
and layer.base_layer._hf_hook.offload
):
if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values():
# retrieve the name of the original disk-offload directory
offload_folder = layer.base_layer._hf_hook.weights_map.dataset.save_folder
layer.base_layer._hf_hook.pre_forward(layer.base_layer)
base_layer_offload = True
yield
for module in offloaded_modules:
module._hf_hook.post_forward(module, torch.tensor([]))
if base_layer_offload:
# re-make weights map (must be on cpu to send params to the disk via memmap if disk offload)
layer.base_layer._hf_hook.weights_map = {
name: param.to("cpu") for name, param in named_module_tensors(layer.base_layer)
}
# offload weights map to disk if original device is the disk
if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values():
# rewrite directory with merged weights
offload_state_dict(offload_folder, layer.base_layer._hf_hook.weights_map)
layer.base_layer._hf_hook.post_forward(layer.base_layer, torch.tensor([]))
class BaseTuner(nn.Module, ABC):
r"""
A base tuner model that provides the common methods and attributes for all tuners that are injectable into a
torch.nn.Module
For adding a new Tuner class, one needs to overwrite the following methods:
- **_prepare_adapter_config**:
A private method to eventually prepare the adapter config, for example in case the field `target_modules` is
missing.
- **_create_and_replace**:
A private method to create and replace the target module with the adapter module.
- **_check_target_module_exists**:
A private helper method to check if the passed module's key name matches any of the target modules in the
adapter_config.
The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class.
Attributes:
model (`torch.nn.Module`):
The model to which the adapter tuner layers will be attached.
forward (`Callable`):
The forward method of the model.
peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`):
The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also
pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new
dictionary with a key `adapter_name` and a value of that peft config.
config (`dict[str, Any]`):
The model configuration object, it should be a dictionary of `str` to `Any` objects.
targeted_module_names (`list[str]`):
The list of module names that were actually adapted. Can be useful to inspect if you want to quickly
double-check that the `config.target_modules` where specified correctly.
"""
def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None:
super().__init__()
self.model = model
self.targeted_module_names: list[str] = []
# For advanced developers, if you want to attach multiple adapters to your
# model, just add a `peft_config` dict attribute to your model.
if not hasattr(self, "peft_config"):
self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config
else:
logger.info(
"Already found a `peft_config` attribute in the model. This will lead to having multiple adapters"
" in the model. Make sure to know what you are doing!"
)
if isinstance(peft_config, PeftConfig):
self.peft_config[adapter_name] = peft_config
else:
# user is adding a dict of PeftConfigs
self.peft_config.update(peft_config)
self.active_adapter = adapter_name
self.inject_adapter(self.model, adapter_name)
# Copy the peft_config in the injected model.
self.model.peft_config = self.peft_config
@property
def active_adapters(self) -> list[str]:
if isinstance(self.active_adapter, str):
return [self.active_adapter]
# is already a list of str
return self.active_adapter
def forward(self, *args: Any, **kwargs: Any):
return self.model.forward(*args, **kwargs)
@abstractmethod
def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig:
r"""
A private method to eventually prepare the adapter config. For transformers based models, if
`peft_config.target_modules` is None, we can automatically infer the target modules from the
`TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to
automatically infer it for all tuner models.
Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example.
Args:
peft_config (`PeftConfig`):
The adapter config.
model_config (`dict`):
The transformers model config, that config should contain the `model_type` key.
"""
...
def _prepare_model(self, peft_config: PeftConfig, model: nn.Module):
r"""
A private method to modify the model structure before adapter is applied.
See `peft.tuner.lora.LoraModel._prepare_model` for an example.
Args:
peft_config (`PeftConfig`):
The prepared adapter config.
model (`nn.Module`):
The model that is going to be adapted.
"""
pass
@abstractmethod
def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool:
r"""
A helper private method to check if the passed module's key name matches any of the target modules in the
`peft_config.target_modules` list. If it does, return `True`, else return `False`.
Args:
peft_config (`PeftConfig`):
The adapter config.
key (`str`):
The module's key name.
"""
...
@abstractmethod
def _create_and_replace(
self,
peft_config: PeftConfig,
adapter_name: str,
target: nn.Module,
target_name: str,
parent: nn.Module,
current_key: str,
) -> None:
r"""
Inplace replacement of the target module with the adapter layer. This method needs to be overridden by all the
tuner classes.
Check `peft.tuners.lora.LoraModel._create_and_replace` for an example.
Args:
peft_config (`PeftConfig`):
The adapter config.
adapter_name (`str`):
The adapter name.
target (`nn.Module`):
The target module.
target_name (`str`):
The target module's name.
parent (`nn.Module`):
The parent module.
current_key (`str`):
The key of the current target being adapted.
"""
...
@abstractmethod
def _mark_only_adapters_as_trainable(self, model: nn.Module):
r"""
A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to
be overridden for all tuner classes to match the correct key names.
Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example.
"""
...
def _check_new_adapter_config(self, config: PeftConfig) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
pass
def _check_merge_allowed(self):
"""Helper method to check whether the adapter can be merged.
Raise a ValueError if it is not possible to merge the adapter with the given configuration.
"""
pass
def inject_adapter(self, model: nn.Module, adapter_name: str):
r"""
Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the
hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed.
The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class.
Args:
model (`nn.Module`):
The model to be tuned.
adapter_name (`str`):
The adapter name.
"""
peft_config = self.peft_config[adapter_name]
# Note: If possible, all checks should be performed *at the start of this method*.
# This way, we can raise early if something goes wrong, without leaving the model
# in a bad (half-initialized) state.
self._check_new_adapter_config(peft_config)
_check_for_modules_to_save = getattr(peft_config, "modules_to_save", None) is not None
_has_modules_to_save = False
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config = self._prepare_adapter_config(peft_config, model_config)
self._prepare_model(peft_config, model)
is_target_modules_in_base_model = False
key_list = [key for key, _ in model.named_modules()]
# update peft_config.target_modules if required
peft_config = _maybe_include_all_linear_layers(peft_config, model)
for key in key_list:
# Check for modules_to_save in case
if _check_for_modules_to_save and any(
key.endswith(f"{module_to_save}") for module_to_save in peft_config.modules_to_save
):
# Optionally set the modules to save
parent, target, target_name = _get_submodules(model, key)
if not isinstance(target, ModulesToSaveWrapper):
new_module = ModulesToSaveWrapper(target, adapter_name)
setattr(parent, target_name, new_module)
else:
target.update(adapter_name)
_has_modules_to_save = True
continue
if not self._check_target_module_exists(peft_config, key):
continue
self.targeted_module_names.append(key)
is_target_modules_in_base_model = True
parent, target, target_name = _get_submodules(model, key)
self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key)
if not is_target_modules_in_base_model:
raise ValueError(
f"Target modules {peft_config.target_modules} not found in the base model. "
f"Please check the target modules and try again."
)
self._mark_only_adapters_as_trainable(model)
if self.peft_config[adapter_name].inference_mode:
for n, p in model.named_parameters():
if adapter_name in n:
p.requires_grad = False
if _has_modules_to_save:
if not hasattr(model, "modules_to_save"):
model.modules_to_save = set(peft_config.modules_to_save)
else:
model.modules_to_save.update(set(peft_config.modules_to_save))
def merge_adapter(self, adapter_names: Optional[list[str]] = None) -> None:
"""
This method merges the adapter layers into the base model.
Merging adapters can lead to a speed up of the forward pass. A copy of the adapter weights is still kept in
memory, which is required to unmerge the adapters. In order to merge the adapter weights without keeping them
in memory, please call `merge_and_unload`.
Args:
safe_merge (`bool`, *optional*):
If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If `None`, all active adapters will be merged.
Defaults to `None`.
"""
self._check_merge_allowed()
for module in self.model.modules():
if isinstance(module, BaseTunerLayer):
with onload_layer(module):
module.merge(adapter_names=adapter_names)
def unmerge_adapter(self):
"""
This method unmerges all merged adapter layers from the base model.
"""
for module in self.model.modules():
if isinstance(module, BaseTunerLayer):
with onload_layer(module):
module.unmerge()
def _unloading_checks(self, adapter_names: Optional[list[str]]):
adapters_to_consider = adapter_names or self.active_adapters
is_modules_to_save_available = any(
self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider
)
if is_modules_to_save_available and len(adapters_to_consider) > 1:
raise ValueError("Cannot unload multiple adapters that specify `modules_to_save`.")
class BaseTunerLayer(ABC):
r"""
A tuner layer mixin that provides the common methods and attributes for all tuners.
Args:
is_pluggable (`bool`, *optional*):
Whether the adapter layer can be plugged to any pytorch module
active_adapters (Union[List[`str`], `str`], *optional*):
The name of the active adapter.
"""
active_adapter = None
# All names of layers that may contain adapter (trainable) weights
adapter_layer_names: tuple[str] = ()
# All names of other parameters that may contain adapter-related parameters
other_param_names: tuple[str] = ()
# indicates whether all adapters should be disabled
_disable_adapters: bool = False
# the currently active adapter(s)
_active_adapter: str | list[str] = "default"
# List all merged adapters
merged_adapters: list[str] = []
def get_base_layer(self) -> nn.Module:
"""
(Recursively) get the base_layer.
This is necessary for the case that the tuner layer wraps another tuner layer.
"""
base_layer = self
while hasattr(base_layer, "base_layer"):
base_layer = base_layer.base_layer
return base_layer
@property
def weight(self) -> torch.Tensor:
# This is required for some transformers code, e.g. for T5, weight is accessed as:
# self.wo.weight
# where "wo" is the adapter layer.
# https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers
# /models/t5/modeling_t5.py#L292
base_layer = self.get_base_layer()
if hasattr(base_layer, "qweight"):
# QuantLinear
weight = base_layer.qweight
else:
# Other layers
weight = base_layer.weight
return weight
@property
def bias(self) -> torch.Tensor:
base_layer = self.get_base_layer()
return base_layer.bias
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
raise NotImplementedError
def unmerge(self) -> None:
raise NotImplementedError
@property
def merged(self) -> bool:
return bool(self.merged_adapters)
@property
def disable_adapters(self) -> bool:
# use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method
return self._disable_adapters
@property
def active_adapter(self) -> str:
# use a property to ensure that active_adapter is not set directly, instead use the set_adapter method
return self._active_adapter
@property
def active_adapters(self):
if isinstance(self.active_adapter, str):
return [self.active_adapter]
# is already a list of str
return self.active_adapter
def enable_adapters(self, enabled: bool) -> None:
"""Toggle the enabling and disabling of adapters
Takes care of setting the requires_grad flag for the adapter weights.
Args:
enabled (bool): True to enable adapters, False to disable adapters
"""
if enabled:
self.set_adapter(self.active_adapters)
self._disable_adapters = False
else:
# disable grads on all adapter layers
for layer_name in self.adapter_layer_names:
layer = getattr(self, layer_name)
layer.requires_grad_(False)
self._disable_adapters = True
def set_adapter(self, adapter_names: str | list[str]) -> None:
"""Set the active adapter(s).
Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
not desired, use the following code.
```py
>>> for name, param in model_peft.named_parameters():
... if ...: # some check on name (ex. if 'lora' in name)
... param.requires_grad = False
```
Args:
adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated.
"""
if isinstance(adapter_names, str):
adapter_names = [adapter_names]
# Deactivate grads on the inactive adapter and activate grads on the active adapter
for layer_name in self.adapter_layer_names:
module_dict = getattr(self, layer_name)
for key, layer in module_dict.items():
if key in adapter_names:
# Note: It is possible that not a single layer is called with requires_grad_(True) here. This may
# happen if a completely different adapter layer is being activated.
layer.requires_grad_(True)
else:
layer.requires_grad_(False)
self._active_adapter = adapter_names
def _all_available_adapter_names(self) -> list[str]:
"""Return a sorted list of all available adapter names"""
adapter_names = set()
for name in self.adapter_layer_names + self.other_param_names:
# we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter
# names
attr = getattr(self, name)
if hasattr(attr, "keys"):
adapter_names.update(attr.keys())
return sorted(adapter_names)
def delete_adapter(self, adapter_name: str) -> None:
"""
Delete an adapter from the layer
This should be called on all adapter layers, or else we will get an inconsistent state.
This method will also set a new active adapter if the deleted adapter was an active adapter. It is important
that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers.
Args:
adapter_name (`str`): The name of the adapter to delete
"""
for attr in self.adapter_layer_names + self.other_param_names:
if adapter_name in getattr(self, attr):
del getattr(self, attr)[adapter_name]
if adapter_name in self.active_adapters:
# choose a new active adapter
active_adapters = self.active_adapters[:]
active_adapters.remove(adapter_name)
if active_adapters:
self.set_adapter(active_adapters)
else:
# no active adapters left, set a new default adapter
# here we get the list of all adapters existing adapter names and choose the first one
remaining_adapters = self._all_available_adapter_names()
if not remaining_adapters:
self.set_adapter([])
else:
new_active_adapter = remaining_adapters[0]
warnings.warn(
f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to "
f"{new_active_adapter}."
)
self.set_adapter(remaining_adapters[0])
def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None:
"""A helper method to check if the passed module's key name matches any of the target modules in the adapter_config.
Args:
config (`LoraConfig` | `LycorisConfig`): A config to match target modules from
key (`str`): A key to search any matches in config
Returns:
`bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or
None if no match found
"""
if isinstance(config.target_modules, str):
target_module_found = re.fullmatch(config.target_modules, key)
elif key in config.target_modules:
# this module is specified directly in target_modules
target_module_found = True
else:
target_module_found = any(key.endswith(f".{target_key}") for target_key in config.target_modules)
layer_indexes = getattr(config, "layers_to_transform", None)
layers_pattern = getattr(config, "layers_pattern", None)
is_using_layer_indexes = layer_indexes is not None and (
len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True
)
if is_using_layer_indexes and target_module_found:
layer_index = None
# TODO: It's still unclear how empty layers_pattern (None, [], or "") should behave
# For now, empty layers_pattern means any layer pattern is ok
if layers_pattern is None or len(layers_pattern) == 0:
layer_index = re.match(r".*\.[^.]*\.(\d+)\.", key)
else:
layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern
for pattern in layers_pattern:
layer_index = re.match(rf".*\.{pattern}\.(\d+)\.", key)
if layer_index is not None:
break
if layer_index is None:
target_module_found = False
else:
layer_index = int(layer_index.group(1))
if isinstance(layer_indexes, int):
target_module_found = layer_index == layer_indexes
else:
target_module_found = layer_index in layer_indexes
return target_module_found
def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict:
"""
A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter.
"""
config = tuner.peft_config[adapter_name]
key_list = [key for key, _ in tuner.model.named_modules()]
module_dict = {"matched": [], "unmatched": []}
for key in key_list:
if tuner._check_target_module_exists(config, key):
module_dict["matched"].append(key)
else:
module_dict["unmatched"].append(key)
return module_dict
def _maybe_include_all_linear_layers(peft_config: PeftConfig, model: nn.Module) -> PeftConfig:
"""
Helper function to update `target_modules` to all linear/Conv1D layers if provided as 'all-linear'. Adapted from
the QLoRA repository: https://github.com/artidoro/qlora/blob/main/qlora.py
"""
# if `target_modules` is a string, convert to lower case and check if it matches "all-linear"
if not (
isinstance(peft_config.target_modules, str)
and peft_config.target_modules.lower() == INCLUDE_LINEAR_LAYERS_SHORTHAND
):
return peft_config
if not isinstance(model, PreTrainedModel):
raise ValueError(
f"Only instances of PreTrainedModel support `target_modules={INCLUDE_LINEAR_LAYERS_SHORTHAND!r}`"
)
linear_classes = (torch.nn.Linear, Conv1D)
linear_module_names = set()
for name, module in model.named_modules():
# match with all linear classes.
if isinstance(module, linear_classes):
names = name.rsplit(".", 1)[-1] # get the base name
linear_module_names.add(names)
# ignore the last classification head for text generation models
output_emb = model.get_output_embeddings()
if output_emb is not None:
last_module_name = [name for name, module in model.named_modules() if module is output_emb][0]
linear_module_names -= {last_module_name}
peft_config.target_modules = linear_module_names
return peft_config
def check_adapters_to_merge(module: BaseTunerLayer, adapter_names: Optional[list[str]] = None) -> list[str]:
"""
Helper function to check which adapters should be merged.
Only return those adapters that are not already merged. Give a warning if some or all of the adapters are already
merged.
"""
if adapter_names is None:
adapter_names = module.active_adapters
if module.merged:
merged_adapters = set(module.merged_adapters)
adapter_names = [name for name in adapter_names if name not in merged_adapters]
if adapter_names:
warnings.warn(
f"Already following adapters were merged {','.join(module.merged_adapters)}. "
f"You are now additionally merging {','.join(adapter_names)}."
)
else:
warnings.warn("All adapters are already merged, nothing to do.")
return adapter_names
def clone_module(module: nn.Module, share_weights=False):
"""Clone a module in a pytorch model.
Clones a module of a model, optionally sharing all the parameters between the original and the clone. Simplifies
reusing a module when manipulating the architecture of a model.
"""
clone = copy.deepcopy(module)
def _share_weights(src: nn.Module, dst: nn.Module):
for name, param in src.named_parameters(recurse=False):
dst.register_parameter(name, param)
if share_weights:
for name, submodule in module.named_modules():
_share_weights(submodule, clone.get_submodule(name))
return clone
def replicate_layers(model: nn.Module, layer_map: list[tuple[int, int]]):
"""Replicate layers in a transfomer model with weight sharing.
This function looks for a module list attribute at model[(.model)*].layers and replicates the layers in the module
list according to the layer map. For example the map `[[0, 4], [2, 5]]` will take the set of layers `[0, 1, 2, 3,
4]` and replace them with a module list containing `[0, 1, 2, 3, 2, 3, 4]`.
"""
while hasattr(model, "model"):
model = model.model
# Some variants of the bert model nest the main model under the bert attribute.
if hasattr(model, "bert"):
model = model.bert
model_type = None
layers: nn.ModuleList = None
if hasattr(model, "layers"):
model_type = "llama"
layers = model.layers
elif hasattr(model, "encoder") and hasattr(model.encoder, "layer"):
model_type = "bert"
layers = model.encoder.layer
elif hasattr(model, "h"):
model_type = "falcon"
layers = model.h
if not model_type or not isinstance(layers, nn.ModuleList):
raise ValueError(
"Could not locate the layers attribute in the model. "
"Expected Llama, Bert or Falcon compatible architectures."
)
new_layers = []
for start, end in layer_map:
for i in range(start, end):
current_idx = len(new_layers)
new_layers.append(clone_module(layers[i], share_weights=True))
# This is a hack needed to work around the layer_idx introduced in HF transformers.
for submodule in new_layers[-1].modules():
if hasattr(submodule, "layer_idx"):
submodule.layer_idx = current_idx
layers = nn.ModuleList(new_layers)
if model_type == "llama":
model.layers = layers
elif model_type == "bert":
model.encoder.layer = layers
elif model_type == "falcon":
model.h = layers
else:
raise ValueError("Unexpected model type, need to handle post-processing of layers.")
if hasattr(model.config, "num_hidden_layers"): # Common to Llama, Bert, Falcon.
model.config.num_hidden_layers = len(new_layers)
| peft/src/peft/tuners/tuners_utils.py/0 | {
"file_path": "peft/src/peft/tuners/tuners_utils.py",
"repo_id": "peft",
"token_count": 12742
} | 176 |
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import tempfile
import time
import unittest
from contextlib import contextmanager
import pytest
import torch
from parameterized import parameterized
from torch import nn
from transformers import AutoModelForCausalLM
from transformers.pytorch_utils import Conv1D
from peft import AdaLoraConfig, IA3Config, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig, PeftModel, get_peft_model
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import ModulesToSaveWrapper, infer_device
from .testing_common import PeftCommonTester
from .testing_utils import get_state_dict, require_torch_gpu
# MLP is a vanilla FF network with only linear layers
# EmbConv1D has an embedding and a Conv1D layer
# Conv2D has a Conv2D layer
TEST_CASES = [
########
# LoRA #
########
("Vanilla MLP 1 LoRA", "MLP", LoraConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 LoRA", "MLP", LoraConfig, {"target_modules": ["lin1"]}),
("Vanilla MLP 4 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0", "lin1"]}),
("Vanilla MLP 5 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 LoRA",
"MLP",
LoraConfig,
{
"target_modules": ["lin0"],
"lora_alpha": 4,
"lora_dropout": 0.1,
},
),
("Vanilla MLP 7 LoRA with DoRA", "MLP", LoraConfig, {"target_modules": ["lin0"], "use_dora": True}),
("Vanilla MLP 8 LoRA with DoRA", "MLP", LoraConfig, {"target_modules": ["lin0", "lin1"], "use_dora": True}),
(
"Vanilla MLP 9 LoRA with DoRA",
"MLP",
LoraConfig,
{"target_modules": "lin1", "use_dora": True, "lora_alpha": 32},
),
("Embedding + transformers Conv1D 1 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["conv1d"]}),
("Embedding + transformers Conv1D 2 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["emb"]}),
("Embedding + transformers Conv1D 3 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["emb", "conv1d"]}),
("Conv2d 1 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d"]}),
("Conv2d 2 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d", "lin0"]}),
("Conv2d 1 LoRA with DoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d"], "use_dora": True}),
("Conv2d 2 LoRA with DoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d", "lin0"], "use_dora": True}),
#######
# IA³ #
#######
("Vanilla MLP 1 IA3", "MLP", IA3Config, {"target_modules": "lin0", "feedforward_modules": []}),
("Vanilla MLP 2 IA3", "MLP", IA3Config, {"target_modules": "lin0", "feedforward_modules": "lin0"}),
("Vanilla MLP 3 IA3", "MLP", IA3Config, {"target_modules": ["lin0"], "feedforward_modules": []}),
("Vanilla MLP 4 IA3", "MLP", IA3Config, {"target_modules": ["lin0"], "feedforward_modules": ["lin0"]}),
("Vanilla MLP 5 IA3", "MLP", IA3Config, {"target_modules": ["lin1"], "feedforward_modules": []}),
("Vanilla MLP 6 IA3", "MLP", IA3Config, {"target_modules": ["lin1"], "feedforward_modules": ["lin1"]}),
(
"Vanilla MLP 7 IA3",
"MLP",
IA3Config,
{"target_modules": ["lin0", "lin1"], "feedforward_modules": []},
),
(
"Vanilla MLP 8 IA3",
"MLP",
IA3Config,
{"target_modules": ["lin0", "lin1"], "feedforward_modules": ["lin0", "lin1"]},
),
(
"Vanilla MLP 9 IA3",
"MLP",
IA3Config,
{"target_modules": ["lin0"], "modules_to_save": ["lin1"], "feedforward_modules": ["lin0"]},
),
(
"transformers Conv1D 1 IA3",
"EmbConv1D",
IA3Config,
{"target_modules": ["conv1d"], "feedforward_modules": ["conv1d"]},
),
(
"transformers Conv1D 2 IA3",
"EmbConv1D",
IA3Config,
{"target_modules": ["conv1d", "lin0"], "feedforward_modules": ["conv1d", "lin0"]},
),
(
"transformers Conv1D 1 IA3",
"EmbConv1D",
IA3Config,
{"target_modules": ["conv1d"], "feedforward_modules": ["conv1d"], "modules_to_save": ["lin1"]},
),
("Conv2d 1 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d"], "feedforward_modules": []}),
("Conv2d 2 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d"], "feedforward_modules": ["conv2d"]}),
(
"Conv2d 3 IA3",
"Conv2d",
IA3Config,
{"target_modules": ["conv2d", "lin0"], "feedforward_modules": []},
),
(
"Conv2d 4 IA3",
"Conv2d",
IA3Config,
{"target_modules": ["conv2d", "lin0"], "feedforward_modules": ["conv2d"]},
),
(
"Conv2d 5 IA3",
"Conv2d",
IA3Config,
{"target_modules": ["conv2d", "lin0"], "feedforward_modules": ["conv2d", "lin0"]},
),
########
# LoHa #
########
("Vanilla MLP 1 LOHA", "MLP", LoHaConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin1"]}),
("Vanilla MLP 4 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0", "lin1"]}),
("Vanilla MLP 5 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 LOHA",
"MLP",
LoHaConfig,
{
"target_modules": ["lin0"],
"alpha": 4,
"module_dropout": 0.1,
},
),
("Vanilla MLP 7 LOHA", "MLP", LoHaConfig, {"target_modules": "lin0", "rank_dropout": 0.5}),
("Conv2d 1 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d"]}),
("Conv2d 2 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d", "lin0"]}),
("Conv2d 3 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d"], "use_effective_conv2d": True}),
("Conv2d 4 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True}),
# LoKr
("Vanilla MLP 1 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin1"]}),
("Vanilla MLP 4 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0", "lin1"]}),
("Vanilla MLP 5 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 LOKR",
"MLP",
LoKrConfig,
{
"target_modules": ["lin0"],
"alpha": 4,
"module_dropout": 0.1,
},
),
("Vanilla MLP 7 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0", "rank_dropout": 0.5}),
("Vanilla MLP 8 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0", "decompose_both": True, "r": 1, "alpha": 1}),
("Conv2d 1 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d"]}),
("Conv2d 2 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"]}),
("Conv2d 3 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d"], "use_effective_conv2d": True}),
("Conv2d 4 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True}),
(
"Conv2d 5 LOKR",
"Conv2d",
LoKrConfig,
{"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True, "decompose_both": True},
),
(
"Conv2d 6 LOKR",
"Conv2d",
LoKrConfig,
{"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True, "decompose_factor": 4},
),
(
"Conv2d 7 LOKR",
"Conv2d",
LoKrConfig,
{
"target_modules": ["conv2d", "lin0"],
"use_effective_conv2d": True,
"decompose_both": True,
"decompose_factor": 4,
},
),
########
# OFT #
########
("Vanilla MLP 1 OFT", "MLP", OFTConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 5 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 OFT",
"MLP",
OFTConfig,
{
"target_modules": ["lin0"],
"module_dropout": 0.1,
},
),
("Vanilla MLP 7 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "coft": True}),
("Vanilla MLP 8 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "block_share": True}),
("Vanilla MLP 9 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "coft": True, "block_share": True}),
("Conv2d 1 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"]}),
("Conv2d 3 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "coft": True}),
("Conv2d 4 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "block_share": True}),
("Conv2d 5 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "coft": True, "block_share": True}),
]
MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES = [
(
"LoRA Same",
"lora",
LoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False},
{"target_modules": ["lin0"], "init_lora_weights": False},
),
(
"LoRA Different",
"lora",
LoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False},
{"target_modules": ["lin1"], "init_lora_weights": False},
),
(
"IA3 Same",
"ia3",
IA3Config,
{
"target_modules": ["lin0"],
"feedforward_modules": ["lin0"],
"init_ia3_weights": False,
},
{
"target_modules": ["lin0"],
"feedforward_modules": ["lin0"],
"init_ia3_weights": False,
},
),
(
"IA3 Different",
"ia3",
IA3Config,
{
"target_modules": ["lin0"],
"feedforward_modules": ["lin0"],
"init_ia3_weights": False,
},
{
"target_modules": ["lin1"],
"feedforward_modules": ["lin1"],
"init_ia3_weights": False,
},
),
(
"AdaLora Same",
"adalora",
AdaLoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
),
(
"AdaLora Different",
"adalora",
AdaLoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin1"], "init_lora_weights": False, "inference_mode": True},
),
]
PREFIXES = {
IA3Config: "ia3_",
LoraConfig: "lora_",
LoHaConfig: "hada_",
LoKrConfig: "lokr_",
OFTConfig: "oft_",
}
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.drop = nn.Dropout(0.5)
self.lin1 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.drop(X)
X = self.lin1(X)
X = self.sm(X)
return X
class Block(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.drop = nn.Dropout(0.5)
self.lin1 = nn.Linear(20, 10, bias=bias)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.drop(X)
X = self.lin1(X)
return X
class DeepMLP(nn.Module):
def __init__(self, bias=True, num_hidden_layers=12):
super().__init__()
self.layers = nn.ModuleList([Block(bias=bias) for _ in range(num_hidden_layers)])
self.out = nn.Linear(10, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float(X)
for layer in self.layers:
X = layer(X)
X = self.out(X)
X = self.sm(X)
return X
class ModelEmbConv1D(nn.Module):
def __init__(self):
super().__init__()
self.emb = nn.Embedding(100, 5)
self.conv1d = Conv1D(1, 5)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.emb(X)
X = self.conv1d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
class ModelEmbWithEmbeddingUtils(nn.Module):
# Adds `get_input_embeddings` and `get_output_embeddings` methods to mimic ð€ transformers models
def __init__(self):
super().__init__()
self.embed_tokens = nn.Embedding(100, 5)
self.conv1d = Conv1D(1, 5)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.embed_tokens(X)
X = self.conv1d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return None
class ModelConv2D(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(5, 10, 3)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float().reshape(-1, 5, 3, 3)
X = self.conv2d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
class MockTransformerWrapper:
"""Mock class to behave like a transformers model.
This is needed because the tests initialize the model by calling transformers_class.from_pretrained.
"""
@classmethod
def from_pretrained(cls, model_id, torch_dtype=None):
# set the seed so that from_pretrained always returns the same model
torch.manual_seed(0)
if torch_dtype is None:
torch_dtype = torch.float32
if model_id == "MLP":
return MLP().to(torch_dtype)
if model_id == "EmbConv1D":
return ModelEmbConv1D().to(torch_dtype)
if model_id == "Conv2d":
return ModelConv2D().to(torch_dtype)
raise ValueError(f"model_id {model_id} not implemented")
class PeftCustomModelTester(unittest.TestCase, PeftCommonTester):
"""TODO"""
transformers_class = MockTransformerWrapper
def prepare_inputs_for_testing(self):
X = torch.arange(90).view(9, 10).to(self.torch_device)
return {"X": X}
@parameterized.expand(TEST_CASES)
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
# This test does not work with custom models because it assumes that
# there is always a method get_input_embeddings that returns a layer
# which does not need updates. Instead, a new test is added below that
# checks that LoRA works as expected.
pass
@parameterized.expand(TEST_CASES)
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(TEST_CASES)
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
else:
config_kwargs["init_weights"] = False
self._test_merge_layers(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs):
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
self._test_merge_layers_fp16(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_merge_layers_is_idempotent(self, test_name, model_id, config_cls, config_kwargs):
# calling merge twice with the same arguments should not change the output
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
self._test_merge_layers_is_idempotent(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_safe_merge(self, test_name, model_id, config_cls, config_kwargs):
# calling merge twice with the same arguments should not change the output
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
else:
config_kwargs["init_weights"] = False
self._test_safe_merge(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_generate(self, test_name, model_id, config_cls, config_kwargs):
# Custom models do not (necessarily) have a generate method, so this test is not performed
pass
@parameterized.expand(TEST_CASES)
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs):
# Custom models do not (necessarily) have a generate method, so this test is not performed
pass
@parameterized.expand(TEST_CASES)
def test_training_custom_models(self, test_name, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_training_custom_models_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
# At the moment, layer indexing only works when layer names conform to a specific pattern, which is not
# guaranteed here. Therefore, this test is not performed.
pass
@parameterized.expand(TEST_CASES)
def test_training_custom_models_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_forward_output_finite(self, test_name, model_id, config_cls, config_kwargs):
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.eval()
with torch.no_grad():
output = model(**X)
assert torch.isfinite(output).all()
@parameterized.expand(TEST_CASES)
def test_only_params_are_updated(self, test_name, model_id, config_cls, config_kwargs):
# An explicit test that when using an adapter on a custom model, only the adapter parameters are updated during
# training
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model_before = copy.deepcopy(model)
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
loss = y_pred.sum()
loss.backward()
optimizer.step()
tol = 1e-4
params_before = dict(model_before.named_parameters())
params_after = dict(model.named_parameters())
assert params_before.keys() == params_after.keys()
prefix = PREFIXES[config_cls]
for name, param_before in params_before.items():
param_after = params_after[name]
if (prefix in name) or ("modules_to_save" in name):
# target_modules and modules_to_save _are_ updated
assert not torch.allclose(param_before, param_after, atol=tol, rtol=tol)
else:
assert torch.allclose(param_before, param_after, atol=tol, rtol=tol)
@parameterized.expand(TEST_CASES)
def test_parameters_after_loading_model(self, test_name, model_id, config_cls, config_kwargs):
# An explicit test that when loading a trained model, the parameters are loaded correctly
# see issue #808
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.train()
lr = 0.5 if not config_kwargs.get("use_dora") else 0.1 # otherwise we get nan
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
loss = y_pred.sum()
loss.backward()
optimizer.step()
tol = 1e-4
params_before = get_state_dict(model)
# note: no need to sanity check if parameters were updated at all, this
# is already covered in the previous test
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model_from_pretrained = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
params_after = get_state_dict(model_from_pretrained)
assert params_before.keys() == params_after.keys()
for name, param_before in params_before.items():
param_after = params_after[name]
assert torch.allclose(param_before, param_after, atol=tol, rtol=tol)
@parameterized.expand(TEST_CASES)
def test_disable_adapters(self, test_name, model_id, config_cls, config_kwargs):
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device).eval()
outputs_base = model(**X)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.eval()
outputs_before = model(**X)
assert torch.allclose(outputs_base, outputs_before)
model.train()
# EmbConv1D is slow to learn for some reason
lr = 0.01 if model_id != "EmbConv1D" else 1.0
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
y = torch.arange(len(y_pred)).to(self.torch_device) % 2
loss = nn.functional.nll_loss(y_pred, y)
loss.backward()
optimizer.step()
model.eval()
outputs_after = model(**X)
with model.disable_adapter():
outputs_disabled = model(**X)
# check that after leaving the disable_adapter context, everything is enabled again
outputs_enabled_after_disable = model(**X)
assert not torch.allclose(outputs_before, outputs_after)
assert torch.allclose(outputs_before, outputs_disabled)
assert torch.allclose(outputs_after, outputs_enabled_after_disable)
@parameterized.expand(TEST_CASES)
def test_disable_adapters_with_merging(self, test_name, model_id, config_cls, config_kwargs):
# same as test_disable_adapters, but with merging
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.eval()
outputs_before = model(**X)
model.train()
lr = 0.01
# Adam optimizer since SGD isn't great for small models with IA3 + Conv1D
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
y = torch.arange(len(y_pred)).to(self.torch_device) % 2
loss = nn.functional.nll_loss(y_pred, y)
loss.backward()
optimizer.step()
model.eval()
outputs_unmerged = model(**X)
model.merge_adapter()
outputs_after = model(**X)
with model.disable_adapter():
outputs_disabled = model(**X)
# check that after leaving the disable_adapter context, everything is enabled again
outputs_enabled_after_disable = model(**X)
atol, rtol = 1e-5, 1e-5 # tolerances higher than defaults since merging introduces some numerical instability
if issubclass(config_cls, IA3Config) and model_id == "Conv2d": # more instability with Conv2d + IA3
atol, rtol = 1e-3, 1e-3
# check that there is a difference in results after training
assert not torch.allclose(outputs_before, outputs_after, atol=atol, rtol=rtol)
# unmerged or merged should make no difference
assert torch.allclose(outputs_after, outputs_unmerged, atol=atol, rtol=rtol)
# check that disabling adapters gives the same results as before training
assert torch.allclose(outputs_before, outputs_disabled, atol=atol, rtol=rtol)
# check that enabling + disabling adapters does not change the results
assert torch.allclose(outputs_after, outputs_enabled_after_disable, atol=atol, rtol=rtol)
@parameterized.expand(TEST_CASES)
def test_disable_adapter_with_bias_warns(self, test_name, model_id, config_cls, config_kwargs):
# When training biases in lora, disabling adapters does not reset the biases, so the output is not what users
# might expect. Therefore, a warning should be given.
# Note: We test only with custom models since they run really fast. There is really no point in testing the same
# thing with decoder, encoder_decoder, etc.
if config_cls != LoraConfig:
# skip this test for other configs as bias is specific to Lora
self.skipTest("Testing bias warnings only for LoraConfig")
if not issubclass(config_cls, LoraConfig):
self.skipTest("Bias argument is only supported for LoRA models")
def run_with_disable(config_kwargs, bias):
config_kwargs = config_kwargs.copy()
config_kwargs["bias"] = bias
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
peft_model = get_peft_model(model, config)
with peft_model.disable_adapter():
pass # there is nothing to be done
# check that bias=all and bias=lora_only give a warning with the correct message
msg_start = "Careful, disabling adapter layers with bias configured to be"
with pytest.warns(UserWarning, match=msg_start):
run_with_disable(config_kwargs, bias="lora_only")
with pytest.warns(UserWarning, match=msg_start):
run_with_disable(config_kwargs, bias="all")
# For bias=none, there is no warning. Unfortunately, AFAIK unittest has no option to assert that no warning is
# given, therefore, we check that the unittest gives us an AssertionError if we check for a warning
bias_warning_was_given = False
try:
with self.assertWarns(UserWarning) as cm:
run_with_disable(config_kwargs, bias="none")
# if we get here, it means there was no AssertionError, i.e. there are warnings -- let's check that they
# are not related to the bias setting
if any(warning.message.args[0].startswith(msg_start) for warning in cm.warnings):
bias_warning_was_given = True
except AssertionError:
# This is good, there was an AssertionError, i.e. there was no warning
pass
if bias_warning_was_given:
# This is bad, there was a warning about the bias when there should not have been any.
self.fail("There should be no warning when bias is set to 'none'")
@parameterized.expand(TEST_CASES)
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs):
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs)
def test_weight_bias_attributes(self):
model = MLP()
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(model, config)
assert hasattr(model.base_model.model.lin0, "weight")
assert hasattr(model.base_model.model.lin0, "bias")
def test_existing_model_card(self):
# ensure that if there is already a model card, it is not overwritten
model = MLP()
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
# create a model card
text = "---\nmeta: hello\n---\nThis is a model card\n"
with open(os.path.join(tmp_dirname, "README.md"), "w") as f:
f.write(text)
model.save_pretrained(tmp_dirname)
with open(os.path.join(tmp_dirname, "README.md")) as f:
model_card = f.read()
assert "library_name: peft" in model_card
assert "meta: hello" in model_card
assert "This is a model card" in model_card
def test_non_existing_model_card(self):
# ensure that if there is already a model card, it is not overwritten
model = MLP()
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
with open(os.path.join(tmp_dirname, "README.md")) as f:
model_card = f.read()
assert "library_name: peft" in model_card
# rough check that the model card is pre-filled
assert len(model_card) > 1000
@parameterized.expand(["auto", True, False])
def test_targeting_lora_to_embedding_layer(self, save_embedding_layers):
model = ModelEmbWithEmbeddingUtils()
config = LoraConfig(target_modules=["embed_tokens", "lin0"], init_lora_weights=False)
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
if save_embedding_layers == "auto":
# assert warning
msg_start = "Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`."
with pytest.warns(UserWarning, match=msg_start):
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
else:
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
from safetensors.torch import load_file as safe_load_file
state_dict = safe_load_file(os.path.join(tmp_dirname, "adapter_model.safetensors"))
if save_embedding_layers in ["auto", True]:
assert "base_model.model.embed_tokens.base_layer.weight" in state_dict
assert torch.allclose(
model.base_model.model.embed_tokens.base_layer.weight,
state_dict["base_model.model.embed_tokens.base_layer.weight"],
)
else:
assert "base_model.model.embed_tokens.base_layer.weight" not in state_dict
del state_dict
@parameterized.expand(["auto", True, False])
def test_targeting_lora_to_embedding_layer_non_transformers(self, save_embedding_layers):
model = ModelEmbConv1D()
config = LoraConfig(target_modules=["emb", "lin0"], init_lora_weights=False)
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
if save_embedding_layers is True:
with pytest.warns(
UserWarning,
match=r"Could not identify embedding layer\(s\) because the model is not a ð€ transformers model\.",
):
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
else:
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
from safetensors.torch import load_file as safe_load_file
state_dict = safe_load_file(os.path.join(tmp_dirname, "adapter_model.safetensors"))
assert "base_model.model.emb.base_layer.weight" not in state_dict
del state_dict
@parameterized.expand(
[
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
LoKrConfig(target_modules=["lin0"], init_weights=False),
LoHaConfig(target_modules=["lin0"], init_weights=False),
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False),
IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"], init_ia3_weights=False),
OFTConfig(target_modules=["lin0"], init_weights=False),
]
)
def test_adapter_name_makes_no_difference(self, config0):
# It should not matter whether we use the default adapter name or a custom one
model_cls = MLP
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
# base model
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
output_base = base_model(input)
# default name
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model_default = get_peft_model(base_model, config0, adapter_name="default").eval().to(self.torch_device)
output_default = peft_model_default(input)
sd_default = peft_model_default.state_dict()
# custom name 1
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model_custom1 = get_peft_model(base_model, config0, adapter_name="adapter").eval().to(self.torch_device)
output_custom1 = peft_model_custom1(input)
sd_custom1 = peft_model_custom1.state_dict()
# custom name 2
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model_custom2 = (
get_peft_model(base_model, config0, adapter_name="other-name").eval().to(self.torch_device)
)
output_custom2 = peft_model_custom2(input)
sd_custom2 = peft_model_custom2.state_dict()
assert len(sd_default) == len(sd_custom1) == len(sd_custom2)
for key in sd_default:
key1 = key.replace("default", "adapter")
key2 = key.replace("default", "other-name")
assert key1 in sd_custom1
assert key2 in sd_custom2
for k0, k1, k2 in zip(sd_default, sd_custom1, sd_custom2):
assert torch.allclose(sd_default[k0], sd_custom1[k1])
assert torch.allclose(sd_default[k0], sd_custom2[k2])
assert not torch.allclose(output_base, output_default)
assert not torch.allclose(output_base, output_custom1)
assert not torch.allclose(output_base, output_custom2)
assert torch.allclose(output_custom1, output_custom2)
assert torch.allclose(output_default, output_custom1)
@parameterized.expand(["merge_and_unload", "unload"])
def test_double_wrapping_merge_and_unload(self, method):
# see issue #1485
from transformers import AutoModelForTokenClassification
model = AutoModelForTokenClassification.from_pretrained("hf-internal-testing/tiny-random-RobertaModel")
config = LoraConfig(task_type="TOKEN_CLS", target_modules="all-linear")
model = get_peft_model(model, config)
# first check that double-wrapping happened
# Note: this may get fixed in a future PR, in which case this test can be removed
assert isinstance(model.base_model.model.classifier, ModulesToSaveWrapper)
assert hasattr(model.base_model.model.classifier.original_module, "lora_A")
assert hasattr(model.base_model.model.classifier.modules_to_save.default, "lora_A")
# after unloading, despite double wrapping, the classifier module should be a normal nn.Linear layer
if method == "merge_and_unload":
unloaded = model.merge_and_unload()
else:
unloaded = model.unload()
assert isinstance(unloaded.classifier, nn.Linear)
class TestMultiRankAdapter(unittest.TestCase):
"""Tests related to multirank LoRA adapters"""
def test_multirank(self):
config_1 = LoraConfig(
r=8,
lora_alpha=8,
init_lora_weights=False,
target_modules=["lin0", "lin1"],
)
config_2 = LoraConfig(
r=8,
lora_alpha=8,
init_lora_weights=False,
target_modules=["lin0", "lin1"],
rank_pattern={"lin0": 4},
alpha_pattern={"lin0": 4},
)
# Add first adapter
model = get_peft_model(MLP(), config_1, adapter_name="first")
# Add second adapter
model.add_adapter("second", config_2)
# Extract current and expected ranks
rank_current = model.lin0.lora_A["second"].weight.shape[0]
rank_expected = config_2.rank_pattern["lin0"]
assert rank_current == rank_expected, f"Rank {rank_current} is not equal to expected {rank_expected}"
def test_multirank_2(self):
rank_pattern = {}
alpha_pattern = {}
r = 4
lora_alpha = 8
for i in range(10):
rank = 64 // (i + 1)
for j in range(2):
rank_pattern[f"layers.{i}.lin{j}"] = rank
alpha_pattern[f"layers.{i}.lin{j}"] = 2 * rank
config = LoraConfig(
r=r,
lora_alpha=lora_alpha,
init_lora_weights=False,
target_modules=["lin0", "lin1"],
rank_pattern=rank_pattern,
alpha_pattern=alpha_pattern,
)
# Add first adapter
model = get_peft_model(DeepMLP(), config, adapter_name="first")
# Add second adapter
model.add_adapter("second", config)
for adapter in ["first", "second"]:
for key, module in model.base_model.model.named_modules():
if isinstance(module, BaseTunerLayer):
rank_expected = rank_pattern.get(key, r)
rank_current = module.lora_A[adapter].weight.shape[0]
assert (
rank_current == rank_expected
), f"Rank {rank_current} is not equal to expected {rank_expected}"
class TestRepr(unittest.TestCase):
"""Tests related to the repr of adapted models"""
def test_repr_lora_linear(self):
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(MLP(), config)
print_output = repr(model.model.lin0)
assert print_output.startswith("lora.Linear")
assert "in_features=10" in print_output
assert "out_features=20" in print_output
assert "lora_A" in print_output
assert "lora_B" in print_output
assert "default" in print_output
def test_repr_lora_embedding(self):
config = LoraConfig(target_modules=["emb"])
model = get_peft_model(ModelEmbConv1D(), config)
print_output = repr(model.model.emb)
assert print_output.startswith("lora.Embedding")
assert "100, 5" in print_output
assert "lora_embedding_A" in print_output
assert "lora_embedding_B" in print_output
assert "default" in print_output
def test_repr_lora_conv1d(self):
config = LoraConfig(target_modules=["conv1d"])
model = get_peft_model(ModelEmbConv1D(), config)
print_output = repr(model.model.conv1d)
assert print_output.startswith("lora.Linear")
assert "in_features=5" in print_output
assert "out_features=1" in print_output
assert "lora_A" in print_output
assert "lora_B" in print_output
assert "default" in print_output
def test_repr_lora_conv2d(self):
config = LoraConfig(target_modules=["conv2d"])
model = get_peft_model(ModelConv2D(), config)
print_output = repr(model.model.conv2d)
assert print_output.startswith("lora.Conv2d")
assert "5, 10" in print_output
assert "kernel_size=(3, 3)" in print_output
assert "stride=(1, 1)" in print_output
assert "lora_A" in print_output
assert "lora_B" in print_output
assert "default" in print_output
class MultipleActiveAdaptersTester(unittest.TestCase):
"""
A test class to test the functionality of multiple active adapters.
This is not specifically tied to custom models, it's just easy to test here and testing it on all types of models
would be overkill.
"""
def prepare_inputs_for_testing(self):
X = torch.arange(90).view(9, 10)
return {"X": X}
def set_multiple_active_adapters(self, model, adapter_names):
for module in model.modules():
if isinstance(module, BaseTunerLayer):
module.set_adapter(adapter_names)
@parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES)
def test_multiple_active_adapters_forward(
self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2
):
torch.manual_seed(0)
model = MLP(bias=tuner_method != "ia3")
model.eval()
X = self.prepare_inputs_for_testing()
config_1 = config_cls(**config_kwargs_1)
config_2 = config_cls(**config_kwargs_2)
peft_model = get_peft_model(model, config_1, adapter_name="adapter_1")
peft_model.add_adapter("adapter_2", config_2)
# set adapter_1
peft_model.set_adapter("adapter_1")
adapter_1_output = peft_model(**X)
# set adapter_2
peft_model.set_adapter("adapter_2")
adapter_2_output = peft_model(**X)
# set ["adapter_1", "adapter_2"]
self.set_multiple_active_adapters(peft_model, ["adapter_1", "adapter_2"])
combined_output = peft_model(**X)
assert not torch.allclose(adapter_1_output, adapter_2_output, atol=1e-5)
assert not torch.allclose(adapter_1_output, combined_output, atol=1e-5)
assert not torch.allclose(adapter_2_output, combined_output, atol=1e-5)
if tuner_method == "lora":
# create a weighted adapter combining both adapters and check that
# its output is same as setting multiple active adapters
peft_model.add_weighted_adapter(
["adapter_1", "adapter_2"], [1.0, 1.0], "new_combined_adapter", combination_type="cat"
)
peft_model.set_adapter("new_combined_adapter")
new_combined_output = peft_model(**X)
assert torch.allclose(new_combined_output, combined_output, atol=1e-5)
@parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES)
def test_multiple_active_adapters_merge_and_unmerge(
self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2
):
torch.manual_seed(0)
model = MLP(bias=tuner_method != "ia3")
model.eval()
X = self.prepare_inputs_for_testing()
base_output = model(**X)
config_1 = config_cls(**config_kwargs_1)
config_2 = config_cls(**config_kwargs_2)
peft_model = get_peft_model(model, config_1, adapter_name="adapter_1")
peft_model.add_adapter("adapter_2", config_2)
# set ["adapter_1", "adapter_2"]
self.set_multiple_active_adapters(peft_model, ["adapter_1", "adapter_2"])
combined_output = peft_model(**X)
peft_model.merge_adapter()
merged_combined_output = peft_model(**X)
assert torch.allclose(merged_combined_output, combined_output, atol=1e-5)
peft_model.unmerge_adapter()
with peft_model.disable_adapter():
disabled_adapter_output = peft_model(**X)
assert torch.allclose(disabled_adapter_output, base_output, atol=1e-4)
@parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES)
def test_merge_layers_multi(self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2):
torch.manual_seed(0)
model = MLP(bias=tuner_method != "ia3")
model.eval()
config_1 = config_cls(**config_kwargs_1)
config_2 = config_cls(**config_kwargs_2)
model = get_peft_model(model, config_1)
dummy_input = self.prepare_inputs_for_testing()
model.eval()
with torch.inference_mode():
logits_adapter_1 = model(**dummy_input)[0]
model.add_adapter("adapter-2", config_2)
model.set_adapter("adapter-2")
model.eval()
with torch.inference_mode():
logits_adapter_2 = model(**dummy_input)[0]
assert not torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3)
model.set_adapter("default")
with torch.inference_mode():
logits_adapter_1_after_set = model(**dummy_input)[0]
assert torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3)
model_copy = copy.deepcopy(model)
model_copy_2 = copy.deepcopy(model)
model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"])
with torch.inference_mode():
logits_merged_all = model_merged_all(**dummy_input)[0]
assert not torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3)
assert not torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3)
model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"])
with torch.inference_mode():
logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0]
assert torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3)
model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"])
with torch.inference_mode():
logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0]
assert torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3)
class RequiresGradTester(unittest.TestCase):
"""Test that requires_grad is set correctly in specific circumstances
# See issue #899.
This is not specifically tied to custom models, it's just easy to test here and testing it on all types of models
would be overkill.
"""
def check_requires_grad(self, model, *params_expected: str):
# Check that only the given parameters have requires_grad=True, and all others have requires_grad=False.
# Calling without arguments besides the model means that all parameters should have requires_grad=False.
params_with_requires_grad = [name for name, param in model.named_parameters() if param.requires_grad]
diff = set(params_expected).symmetric_difference(set(params_with_requires_grad))
msg = f"Expected {params_expected} to require gradients, got {params_with_requires_grad}"
assert len(diff) == 0, msg
def test_requires_grad_modules_to_save_default(self):
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(MLP(), config)
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
def test_requires_grad_modules_to_save_disabling(self):
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(MLP(), config)
# when disabling the adapter, the original module's grad should be enabled and vice versa
peft_model.disable_adapter_layers()
self.check_requires_grad(
peft_model,
"base_model.model.lin1.original_module.weight",
"base_model.model.lin1.original_module.bias",
)
# when re-enabling the adapter, the original module's grad should be disabled and vice versa
peft_model.enable_adapter_layers()
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# when using the disable_adapter context, the original module's grad should be enabled and vice versa
with peft_model.disable_adapter():
self.check_requires_grad(
peft_model,
"base_model.model.lin1.original_module.weight",
"base_model.model.lin1.original_module.bias",
)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
def test_requires_grad_modules_to_save_multiple_adapters(self):
config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config1 as active, should lead to adapter1 requiring grad
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.adapter1.weight",
"base_model.model.lin1.modules_to_save.adapter1.bias",
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
def test_requires_grad_lora_different_targets(self):
# test two different LoRA adapters that target different modules
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoraConfig(target_modules=["lin1"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1.weight",
"base_model.model.lin1.lora_B.adapter1.weight",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1.weight",
"base_model.model.lin1.lora_B.adapter1.weight",
)
def test_requires_grad_lora_same_targets(self):
# same as previous test, except that LoRA adapters target the same layer
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoraConfig(target_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
def test_requires_grad_ia3_different_targets(self):
# test two different IA3 adapters that target different modules
config0 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = IA3Config(target_modules=["lin1"], feedforward_modules=["lin1"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.ia3_l.adapter1",
)
def test_requires_grad_ia3_same_targets(self):
# same as previous test, except that IA3 adapters target the same layer
config0 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
def test_requires_grad_adalora_different_targets(self):
# test two different AdaLora adapters that target different modules
config0 = AdaLoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = AdaLoraConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1",
"base_model.model.lin1.lora_B.adapter1",
"base_model.model.lin1.lora_E.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1",
"base_model.model.lin1.lora_B.adapter1",
"base_model.model.lin1.lora_E.adapter1",
)
def test_requires_grad_adalora_same_targets(self):
# same as previous test, except that AdaLora adapters target the same layer
config0 = AdaLoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = AdaLoraConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1",
"base_model.model.lin0.lora_B.adapter1",
"base_model.model.lin0.lora_E.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1",
"base_model.model.lin0.lora_B.adapter1",
"base_model.model.lin0.lora_E.adapter1",
)
def test_requires_grad_lora_conv2d(self):
# test two different LoRA adapters that target different modules
config0 = LoraConfig(target_modules=["conv2d"])
peft_model = get_peft_model(ModelConv2D(), config0)
config1 = LoraConfig(target_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.lora_A.default.weight",
"base_model.model.conv2d.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.lora_A.default.weight",
"base_model.model.conv2d.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
def test_requires_grad_lora_emb_conv1d(self):
# test two different LoRA adapters that target different modules
config0 = LoraConfig(target_modules=["conv1d"])
peft_model = get_peft_model(ModelEmbConv1D(), config0)
config1 = LoraConfig(target_modules=["emb"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.lora_A.default.weight",
"base_model.model.conv1d.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.lora_A.default.weight",
"base_model.model.conv1d.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.emb.lora_embedding_A.adapter1",
"base_model.model.emb.lora_embedding_B.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.emb.lora_embedding_A.adapter1",
"base_model.model.emb.lora_embedding_B.adapter1",
)
def test_requires_grad_ia3_conv1d(self):
# test two different LoRA adapters that target different modules
config0 = IA3Config(target_modules=["conv1d"], feedforward_modules=[])
peft_model = get_peft_model(ModelEmbConv1D(), config0)
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
def test_requires_grad_ia3_conv2d(self):
# test two different LoRA adapters that target different modules
config0 = IA3Config(target_modules=["conv2d"], feedforward_modules=["conv2d"])
peft_model = get_peft_model(ModelConv2D(), config0)
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=[])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
def test_requires_grad_loha_different_targets(self):
# test two different LoHa adapters that target different modules
config0 = LoHaConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoHaConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active pter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.hada_w1_a.adapter1",
"base_model.model.lin1.hada_w1_b.adapter1",
"base_model.model.lin1.hada_w2_a.adapter1",
"base_model.model.lin1.hada_w2_b.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.hada_w1_a.adapter1",
"base_model.model.lin1.hada_w1_b.adapter1",
"base_model.model.lin1.hada_w2_a.adapter1",
"base_model.model.lin1.hada_w2_b.adapter1",
)
def test_requires_grad_loha_same_targets(self):
# same as previous test, except that LoHa adapters target the same layer
config0 = LoHaConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoHaConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.adapter1",
"base_model.model.lin0.hada_w1_b.adapter1",
"base_model.model.lin0.hada_w2_a.adapter1",
"base_model.model.lin0.hada_w2_b.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.adapter1",
"base_model.model.lin0.hada_w1_b.adapter1",
"base_model.model.lin0.hada_w2_a.adapter1",
"base_model.model.lin0.hada_w2_b.adapter1",
)
def test_requires_grad_lokr_different_targets(self):
# test two different LoKr adapters that target different modules
config0 = LoKrConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoKrConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active pter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lokr_w1.adapter1",
"base_model.model.lin1.lokr_w2.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lokr_w1.adapter1",
"base_model.model.lin1.lokr_w2.adapter1",
)
def test_requires_grad_lokr_same_targets(self):
# same as previous test, except that LoKr adapters target the same layer
config0 = LoKrConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoKrConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.adapter1",
"base_model.model.lin0.lokr_w2.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.adapter1",
"base_model.model.lin0.lokr_w2.adapter1",
)
def test_requires_grad_oft_different_targets(self):
# test two different OFT adapters that target different modules
config0 = OFTConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = OFTConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active pter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.oft_r.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.oft_r.adapter1",
)
def test_requires_grad_oft_same_targets(self):
# same as previous test, except that OFT adapters target the same layer
config0 = OFTConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = OFTConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.adapter1",
)
class TestMixedAdapterBatches:
torch_device = infer_device()
@pytest.fixture
def mlp_lora(self):
"""A simple MLP with 2 LoRA adapters"""
torch.manual_seed(0)
base_model = MLP().to(self.torch_device).eval()
config0 = LoraConfig(target_modules=["lin0"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["lin0"], r=16, init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
return peft_model
def run_checks(self, model, inputs):
# This checks that we can have mixed adapters in a single batch. The test works by creating the outputs for the
# base model, adapter 0, and adapter 1 separately. Then, we create an output with mixed adapters, where the
# sample [0, 3, 6] are for the base model, [1, 4, 7] for adapter 0, and [2, 5, 8] for adapter 1. Finally, we
# check that the outputs of the mixed batch are correct for the corresponding indices.
adapter_name0, adapter_name1 = model.peft_config.keys()
with model.disable_adapter():
output_base = model(**inputs)
model.set_adapter(adapter_name0)
output0 = model(**inputs)
# sanity check, outputs are not the same
assert not torch.allclose(output_base, output0)
model.set_adapter(adapter_name1)
output1 = model(**inputs)
# sanity check, outputs have the right shape and are not the same
assert len(output_base) >= 3
assert len(output_base) == len(output0) == len(output1)
assert not torch.allclose(output_base, output0)
assert not torch.allclose(output_base, output1)
# set adapter_indices so that it alternates between base, adapter 0, and adapter 1
adapters = ["__base__", adapter_name0, adapter_name1]
inputs["adapter_names"] = [adapters[i % 3] for i in (range(len(inputs["X"])))]
output_mixed = model.forward(**inputs)
assert torch.allclose(output_base[::3], output_mixed[::3])
assert torch.allclose(output0[1::3], output_mixed[1::3])
assert torch.allclose(output1[2::3], output_mixed[2::3])
def test_mixed_adapter_batches_lora_mlp(self, mlp_lora):
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(mlp_lora, inputs)
def test_mixed_adapter_batches_lora_different_target_layers(self, mlp_lora):
base_model = MLP().to(self.torch_device).eval()
# target different lora layers
config0 = LoraConfig(target_modules=["lin0"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["lin1"], init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_partly_overlapping_target_layers(self, mlp_lora):
base_model = MLP().to(self.torch_device).eval()
# target different lora layers
config0 = LoraConfig(target_modules=["lin0"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["lin0", "lin1"], init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_conv1d_emb(self):
base_model = ModelEmbConv1D().to(self.torch_device).eval()
config0 = LoraConfig(target_modules=["emb", "conv1d"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["emb", "conv1d"], r=16, init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_conv2d(self):
base_model = ModelConv2D().to(self.torch_device).eval()
config0 = LoraConfig(target_modules=["conv2d"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["conv2d"], r=16, init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(270).view(6, 5, 3, 3).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_length_mismatch_raises(self, mlp_lora):
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["__base__"] * 5, # wrong length!
}
msg = r"Length of `adapter_names` should be the same as the number of inputs, but got "
with pytest.raises(ValueError, match=msg):
mlp_lora.forward(**inputs)
def test_mixed_adapter_batches_lora_training_mode_raises(self, mlp_lora):
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["__base__"] * 9,
}
mlp_lora = mlp_lora.train()
msg = r"Cannot pass `adapter_names` when the model is in training mode."
with pytest.raises(ValueError, match=msg):
mlp_lora.forward(**inputs)
def test_mixed_adapter_batches_lora_disabled(self, mlp_lora):
# Disabling adapters should have precedence over passing adapter names
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
with mlp_lora.disable_adapter():
output_disabled = mlp_lora(**inputs)
adapters = ["__base__", "adapter0", "adapter1"]
inputs["adapter_names"] = [adapters[i % 3] for i in (range(len(inputs["X"])))]
with mlp_lora.disable_adapter():
output_mixed = mlp_lora.forward(**inputs)
assert torch.allclose(output_disabled, output_mixed)
def test_mixed_adapter_batches_lora_merged_raises(self, mlp_lora):
# When there are merged adapters, passing adapter names should raise an error
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["default"] * 9,
}
mlp_lora.merge_adapter(["adapter0"])
msg = r"Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first."
with pytest.raises(ValueError, match=msg):
mlp_lora.forward(**inputs)
def test_mixed_adapter_batches_lora_with_dora_raises(self):
# When there are Dora adapters, passing adapter names should raise an error
torch.manual_seed(0)
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["default"] * 9,
}
base_model = MLP().to(self.torch_device).eval()
config = LoraConfig(target_modules=["lin0"], init_lora_weights=False, use_dora=True)
peft_model = get_peft_model(base_model, config).eval()
msg = r"Cannot pass `adapter_names` when DoRA is enabled."
with pytest.raises(ValueError, match=msg):
peft_model.forward(**inputs)
@require_torch_gpu
def test_mixed_adapter_batches_lora_opt_timing(self):
# Use a more realistic model (opt-125m) and do a simple runtime check to ensure that mixed adapter batches
# don't add too much overhead. These types of tests are inherently flaky, so we try to add in some robustness.
logs = [] # store the time it takes to run each forward pass here
@contextmanager
def timed():
tic = time.perf_counter()
yield
toc = time.perf_counter()
logs.append(toc - tic)
base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(self.torch_device).eval()
inputs = {"input_ids": torch.randint(0, 1000, (16, 64)).to(self.torch_device)}
with timed():
output_base = base_model(**inputs).logits
config0 = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter1").eval()
with timed():
output0 = peft_model(**inputs).logits
# sanity check, outputs are not the same
assert not torch.allclose(output_base, output0)
config1 = LoraConfig(task_type="CAUSAL_LM", r=16, init_lora_weights=False)
peft_model.add_adapter("adapter2", config1)
peft_model.set_adapter("adapter2")
with timed():
output1 = peft_model(**inputs).logits
# sanity check, outputs are not the same
assert not torch.allclose(output_base, output1)
# set adapter_indices so that it alternates between 0 (base), lora 1, and lora 2
adapters = ["__base__", "adapter1", "adapter2"]
inputs["adapter_names"] = [adapters[i % 3] for i in (range(len(inputs["input_ids"])))]
with timed():
output_mixed = peft_model.forward(**inputs).logits
atol, rtol = 1e-4, 1e-4
assert torch.allclose(output_base[::3], output_mixed[::3], atol=atol, rtol=rtol)
assert torch.allclose(output0[1::3], output_mixed[1::3], atol=atol, rtol=rtol)
assert torch.allclose(output1[2::3], output_mixed[2::3], atol=atol, rtol=rtol)
# Check that the overhead in time added by mixed batches is not too high.
# To prevent flakiness, we measure mixed inference 3 times and take the lowest value, then compare it to the mean
# of the non-mixed inference times. We also grant a generous margin of 2x the mean time.
with timed():
output_mixed = peft_model.forward(**inputs).logits
with timed():
output_mixed = peft_model.forward(**inputs).logits
time_base, time0, time1, *time_mixed = logs
time_non_mixed = (time_base + time0 + time1) / 3
time_mixed = min(time_mixed)
factor = 2.0
assert time_mixed < factor * time_non_mixed
# Measure timing of running base and adapter separately vs using a mixed batch. Note that on CPU, the
# differences are quite small, so this test requires GPU to avoid flakiness.
for _ in range(3):
with timed():
with peft_model.disable_adapter():
peft_model(**{k: v[::3] for k, v in inputs.items()})
peft_model.set_adapter("adapter1")
peft_model(**{k: v[1::3] for k, v in inputs.items()})
peft_model.set_adapter("adapter2")
peft_model(**{k: v[2::3] for k, v in inputs.items()})
times_separate = logs[-3:]
time_separate = sum(times_separate) / 3
assert time_separate > time_mixed
| peft/tests/test_custom_models.py/0 | {
"file_path": "peft/tests/test_custom_models.py",
"repo_id": "peft",
"token_count": 41819
} | 177 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from contextlib import contextmanager
import numpy as np
import pytest
import torch
from peft.import_utils import is_aqlm_available, is_auto_awq_available, is_auto_gptq_available, is_optimum_available
def require_torch_gpu(test_case):
"""
Decorator marking a test that requires a GPU. Will be skipped when no GPU is available.
"""
if not torch.cuda.is_available():
return unittest.skip("test requires GPU")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires multiple GPUs. Will be skipped when less than 2 GPUs are available.
"""
if not torch.cuda.is_available() or torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed.
"""
try:
import bitsandbytes # noqa: F401
test_case = pytest.mark.bitsandbytes(test_case)
except ImportError:
test_case = pytest.mark.skip(reason="test requires bitsandbytes")(test_case)
return test_case
def require_auto_gptq(test_case):
"""
Decorator marking a test that requires auto-gptq. These tests are skipped when auto-gptq isn't installed.
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm. These tests are skipped when aqlm isn't installed.
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_auto_awq(test_case):
"""
Decorator marking a test that requires auto-awq. These tests are skipped when auto-awq isn't installed.
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires auto-awq")(test_case)
def require_optimum(test_case):
"""
Decorator marking a test that requires optimum. These tests are skipped when optimum isn't installed.
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
@contextmanager
def temp_seed(seed: int):
"""Temporarily set the random seed. This works for python numpy, pytorch."""
np_state = np.random.get_state()
np.random.seed(seed)
torch_state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch_cuda_states = torch.cuda.get_rng_state_all()
torch.cuda.manual_seed_all(seed)
try:
yield
finally:
np.random.set_state(np_state)
torch.random.set_rng_state(torch_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state_all(torch_cuda_states)
def get_state_dict(model, unwrap_compiled=True):
"""
Get the state dict of a model. If the model is compiled, unwrap it first.
"""
if unwrap_compiled:
model = getattr(model, "_orig_mod", model)
return model.state_dict()
| peft/tests/testing_utils.py/0 | {
"file_path": "peft/tests/testing_utils.py",
"repo_id": "peft",
"token_count": 1322
} | 178 |
# Archived Changes
### Nov 22, 2021
* A number of updated weights anew new model defs
* `eca_halonext26ts` - 79.5 @ 256
* `resnet50_gn` (new) - 80.1 @ 224, 81.3 @ 288
* `resnet50` - 80.7 @ 224, 80.9 @ 288 (trained at 176, not replacing current a1 weights as default since these don't scale as well to higher res, [weights](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth))
* `resnext50_32x4d` - 81.1 @ 224, 82.0 @ 288
* `sebotnet33ts_256` (new) - 81.2 @ 224
* `lamhalobotnet50ts_256` - 81.5 @ 256
* `halonet50ts` - 81.7 @ 256
* `halo2botnet50ts_256` - 82.0 @ 256
* `resnet101` - 82.0 @ 224, 82.8 @ 288
* `resnetv2_101` (new) - 82.1 @ 224, 83.0 @ 288
* `resnet152` - 82.8 @ 224, 83.5 @ 288
* `regnetz_d8` (new) - 83.5 @ 256, 84.0 @ 320
* `regnetz_e8` (new) - 84.5 @ 256, 85.0 @ 320
* `vit_base_patch8_224` (85.8 top-1) & `in21k` variant weights added thanks [Martins Bruveris](https://github.com/martinsbruveris)
* Groundwork in for FX feature extraction thanks to [Alexander Soare](https://github.com/alexander-soare)
* models updated for tracing compatibility (almost full support with some distlled transformer exceptions)
### Oct 19, 2021
* ResNet strikes back (https://arxiv.org/abs/2110.00476) weights added, plus any extra training components used. Model weights and some more details here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-rsb-weights)
* BCE loss and Repeated Augmentation support for RSB paper
* 4 series of ResNet based attention model experiments being added (implemented across byobnet.py/byoanet.py). These include all sorts of attention, from channel attn like SE, ECA to 2D QKV self-attention layers such as Halo, Bottlneck, Lambda. Details here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-attn-weights)
* Working implementations of the following 2D self-attention modules (likely to be differences from paper or eventual official impl):
* Halo (https://arxiv.org/abs/2103.12731)
* Bottleneck Transformer (https://arxiv.org/abs/2101.11605)
* LambdaNetworks (https://arxiv.org/abs/2102.08602)
* A RegNetZ series of models with some attention experiments (being added to). These do not follow the paper (https://arxiv.org/abs/2103.06877) in any way other than block architecture, details of official models are not available. See more here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-attn-weights)
* ConvMixer (https://openreview.net/forum?id=TVHS5Y4dNvM), CrossVit (https://arxiv.org/abs/2103.14899), and BeiT (https://arxiv.org/abs/2106.08254) architectures + weights added
* freeze/unfreeze helpers by [Alexander Soare](https://github.com/alexander-soare)
### Aug 18, 2021
* Optimizer bonanza!
* Add LAMB and LARS optimizers, incl trust ratio clipping options. Tweaked to work properly in PyTorch XLA (tested on TPUs w/ `timm bits` [branch](https://github.com/rwightman/pytorch-image-models/tree/bits_and_tpu/timm/bits))
* Add MADGRAD from FB research w/ a few tweaks (decoupled decay option, step handling that works with PyTorch XLA)
* Some cleanup on all optimizers and factory. No more `.data`, a bit more consistency, unit tests for all!
* SGDP and AdamP still won't work with PyTorch XLA but others should (have yet to test Adabelief, Adafactor, Adahessian myself).
* EfficientNet-V2 XL TF ported weights added, but they don't validate well in PyTorch (L is better). The pre-processing for the V2 TF training is a bit diff and the fine-tuned 21k -> 1k weights are very sensitive and less robust than the 1k weights.
* Added PyTorch trained EfficientNet-V2 'Tiny' w/ GlobalContext attn weights. Only .1-.2 top-1 better than the SE so more of a curiosity for those interested.
### July 12, 2021
* Add XCiT models from [official facebook impl](https://github.com/facebookresearch/xcit). Contributed by [Alexander Soare](https://github.com/alexander-soare)
### July 5-9, 2021
* Add `efficientnetv2_rw_t` weights, a custom 'tiny' 13.6M param variant that is a bit better than (non NoisyStudent) B3 models. Both faster and better accuracy (at same or lower res)
* top-1 82.34 @ 288x288 and 82.54 @ 320x320
* Add [SAM pretrained](https://arxiv.org/abs/2106.01548) in1k weight for ViT B/16 (`vit_base_patch16_sam_224`) and B/32 (`vit_base_patch32_sam_224`) models.
* Add 'Aggregating Nested Transformer' (NesT) w/ weights converted from official [Flax impl](https://github.com/google-research/nested-transformer). Contributed by [Alexander Soare](https://github.com/alexander-soare).
* `jx_nest_base` - 83.534, `jx_nest_small` - 83.120, `jx_nest_tiny` - 81.426
### June 23, 2021
* Reproduce gMLP model training, `gmlp_s16_224` trained to 79.6 top-1, matching [paper](https://arxiv.org/abs/2105.08050). Hparams for this and other recent MLP training [here](https://gist.github.com/rwightman/d6c264a9001f9167e06c209f630b2cc6)
### June 20, 2021
* Release Vision Transformer 'AugReg' weights from [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270)
* .npz weight loading support added, can load any of the 50K+ weights from the [AugReg series](https://console.cloud.google.com/storage/browser/vit_models/augreg)
* See [example notebook](https://colab.research.google.com/github/google-research/vision_transformer/blob/master/vit_jax_augreg.ipynb) from [official impl](https://github.com/google-research/vision_transformer/) for navigating the augreg weights
* Replaced all default weights w/ best AugReg variant (if possible). All AugReg 21k classifiers work.
* Highlights: `vit_large_patch16_384` (87.1 top-1), `vit_large_r50_s32_384` (86.2 top-1), `vit_base_patch16_384` (86.0 top-1)
* `vit_deit_*` renamed to just `deit_*`
* Remove my old small model, replace with DeiT compatible small w/ AugReg weights
* Add 1st training of my `gmixer_24_224` MLP /w GLU, 78.1 top-1 w/ 25M params.
* Add weights from official ResMLP release (https://github.com/facebookresearch/deit)
* Add `eca_nfnet_l2` weights from my 'lightweight' series. 84.7 top-1 at 384x384.
* Add distilled BiT 50x1 student and 152x2 Teacher weights from [Knowledge distillation: A good teacher is patient and consistent](https://arxiv.org/abs/2106.05237)
* NFNets and ResNetV2-BiT models work w/ Pytorch XLA now
* weight standardization uses F.batch_norm instead of std_mean (std_mean wasn't lowered)
* eps values adjusted, will be slight differences but should be quite close
* Improve test coverage and classifier interface of non-conv (vision transformer and mlp) models
* Cleanup a few classifier / flatten details for models w/ conv classifiers or early global pool
* Please report any regressions, this PR touched quite a few models.
### June 8, 2021
* Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1.
* Add ResNet51-Q model w/ pretrained weights at 82.36 top-1.
* NFNet inspired block layout with quad layer stem and no maxpool
* Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288
### May 25, 2021
* Add LeViT, Visformer, Convit (PR by Aman Arora), Twins (PR by paper authors) transformer models
* Cleanup input_size/img_size override handling and testing for all vision transformer models
* Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params.
### May 14, 2021
* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl.
* 1k trained variants: `tf_efficientnetv2_s/m/l`
* 21k trained variants: `tf_efficientnetv2_s/m/l_in21k`
* 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k`
* v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3`
* Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s`
* Some blank `efficientnetv2_*` models in-place for future native PyTorch training
### May 5, 2021
* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen)
* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit)
* Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora)
* Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin)
* Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23)
* Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai)
* Update ByoaNet attention modles
* Improve SA module inits
* Hack together experimental stand-alone Swin based attn module and `swinnet`
* Consistent '26t' model defs for experiments.
* Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1.
* WandB logging support
### April 13, 2021
* Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer
### April 12, 2021
* Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256.
* Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training.
* Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs
* Lambda Networks - https://arxiv.org/abs/2102.08602
* Bottleneck Transformers - https://arxiv.org/abs/2101.11605
* Halo Nets - https://arxiv.org/abs/2103.12731
* Adabelief optimizer contributed by Juntang Zhuang
### April 1, 2021
* Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference
* Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit)
* Merged distilled variant into main for torchscript compatibility
* Some `timm` cleanup/style tweaks and weights have hub download support
* Cleanup Vision Transformer (ViT) models
* Merge distilled (DeiT) model into main so that torchscript can work
* Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch)
* Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids
* Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants
* nn.Sequential for block stack (does not break downstream compat)
* TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT)
* Add RegNetY-160 weights from DeiT teacher model
* Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288
* Some fixes/improvements for TFDS dataset wrapper
### March 7, 2021
* First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc).
* Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation.
### Feb 18, 2021
* Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets).
* Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn.
* These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants.
* Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated).
* Matching the original pre-processing as closely as possible I get these results:
* `dm_nfnet_f6` - 86.352
* `dm_nfnet_f5` - 86.100
* `dm_nfnet_f4` - 85.834
* `dm_nfnet_f3` - 85.676
* `dm_nfnet_f2` - 85.178
* `dm_nfnet_f1` - 84.696
* `dm_nfnet_f0` - 83.464
### Feb 16, 2021
* Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py.
* AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc`
* PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0`
* PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value`
* AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet.
### Feb 12, 2021
* Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs
### Feb 10, 2021
* More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks')
* GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py`
* RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py`
* classic VGG (from torchvision, impl in `vgg`)
* Refinements to normalizer layer arg handling and normalizer+act layer handling in some models
* Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not.
* Fix a few bugs introduced since last pypi release
### Feb 8, 2021
* Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352.
* `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256
* `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256
* `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320
* Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed).
* Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test.
### Jan 30, 2021
* Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692)
### Jan 25, 2021
* Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer
* Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer
* ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support
* NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning
* Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit
* Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes
* Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script
* Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2`
* Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar
* Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp`
* Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling
### Jan 3, 2021
* Add SE-ResNet-152D weights
* 256x256 val, 0.94 crop top-1 - 83.75
* 320x320 val, 1.0 crop - 84.36
* Update results files
### Dec 18, 2020
* Add ResNet-101D, ResNet-152D, and ResNet-200D weights trained @ 256x256
* 256x256 val, 0.94 crop (top-1) - 101D (82.33), 152D (83.08), 200D (83.25)
* 288x288 val, 1.0 crop - 101D (82.64), 152D (83.48), 200D (83.76)
* 320x320 val, 1.0 crop - 101D (83.00), 152D (83.66), 200D (84.01)
### Dec 7, 2020
* Simplify EMA module (ModelEmaV2), compatible with fully torchscripted models
* Misc fixes for SiLU ONNX export, default_cfg missing from Feature extraction models, Linear layer w/ AMP + torchscript
* PyPi release @ 0.3.2 (needed by EfficientDet)
### Oct 30, 2020
* Test with PyTorch 1.7 and fix a small top-n metric view vs reshape issue.
* Convert newly added 224x224 Vision Transformer weights from official JAX repo. 81.8 top-1 for B/16, 83.1 L/16.
* Support PyTorch 1.7 optimized, native SiLU (aka Swish) activation. Add mapping to 'silu' name, custom swish will eventually be deprecated.
* Fix regression for loading pretrained classifier via direct model entrypoint functions. Didn't impact create_model() factory usage.
* PyPi release @ 0.3.0 version!
### Oct 26, 2020
* Update Vision Transformer models to be compatible with official code release at https://github.com/google-research/vision_transformer
* Add Vision Transformer weights (ImageNet-21k pretrain) for 384x384 base and large models converted from official jax impl
* ViT-B/16 - 84.2
* ViT-B/32 - 81.7
* ViT-L/16 - 85.2
* ViT-L/32 - 81.5
### Oct 21, 2020
* Weights added for Vision Transformer (ViT) models. 77.86 top-1 for 'small' and 79.35 for 'base'. Thanks to [Christof](https://www.kaggle.com/christofhenkel) for training the base model w/ lots of GPUs.
### Oct 13, 2020
* Initial impl of Vision Transformer models. Both patch and hybrid (CNN backbone) variants. Currently trying to train...
* Adafactor and AdaHessian (FP32 only, no AMP) optimizers
* EdgeTPU-M (`efficientnet_em`) model trained in PyTorch, 79.3 top-1
* Pip release, doc updates pending a few more changes...
### Sept 18, 2020
* New ResNet 'D' weights. 72.7 (top-1) ResNet-18-D, 77.1 ResNet-34-D, 80.5 ResNet-50-D
* Added a few untrained defs for other ResNet models (66D, 101D, 152D, 200/200D)
### Sept 3, 2020
* New weights
* Wide-ResNet50 - 81.5 top-1 (vs 78.5 torchvision)
* SEResNeXt50-32x4d - 81.3 top-1 (vs 79.1 cadene)
* Support for native Torch AMP and channels_last memory format added to train/validate scripts (`--channels-last`, `--native-amp` vs `--apex-amp`)
* Models tested with channels_last on latest NGC 20.08 container. AdaptiveAvgPool in attn layers changed to mean((2,3)) to work around bug with NHWC kernel.
### Aug 12, 2020
* New/updated weights from training experiments
* EfficientNet-B3 - 82.1 top-1 (vs 81.6 for official with AA and 81.9 for AdvProp)
* RegNetY-3.2GF - 82.0 top-1 (78.9 from official ver)
* CSPResNet50 - 79.6 top-1 (76.6 from official ver)
* Add CutMix integrated w/ Mixup. See [pull request](https://github.com/rwightman/pytorch-image-models/pull/218) for some usage examples
* Some fixes for using pretrained weights with `in_chans` != 3 on several models.
### Aug 5, 2020
Universal feature extraction, new models, new weights, new test sets.
* All models support the `features_only=True` argument for `create_model` call to return a network that extracts feature maps from the deepest layer at each stride.
* New models
* CSPResNet, CSPResNeXt, CSPDarkNet, DarkNet
* ReXNet
* (Modified Aligned) Xception41/65/71 (a proper port of TF models)
* New trained weights
* SEResNet50 - 80.3 top-1
* CSPDarkNet53 - 80.1 top-1
* CSPResNeXt50 - 80.0 top-1
* DPN68b - 79.2 top-1
* EfficientNet-Lite0 (non-TF ver) - 75.5 (submitted by [@hal-314](https://github.com/hal-314))
* Add 'real' labels for ImageNet and ImageNet-Renditions test set, see [`results/README.md`](results/README.md)
* Test set ranking/top-n diff script by [@KushajveerSingh](https://github.com/KushajveerSingh)
* Train script and loader/transform tweaks to punch through more aug arguments
* README and documentation overhaul. See initial (WIP) documentation at https://rwightman.github.io/pytorch-image-models/
* adamp and sgdp optimizers added by [@hellbell](https://github.com/hellbell)
### June 11, 2020
Bunch of changes:
* DenseNet models updated with memory efficient addition from torchvision (fixed a bug), blur pooling and deep stem additions
* VoVNet V1 and V2 models added, 39 V2 variant (ese_vovnet_39b) trained to 79.3 top-1
* Activation factory added along with new activations:
* select act at model creation time for more flexibility in using activations compatible with scripting or tracing (ONNX export)
* hard_mish (experimental) added with memory-efficient grad, along with ME hard_swish
* context mgr for setting exportable/scriptable/no_jit states
* Norm + Activation combo layers added with initial trial support in DenseNet and VoVNet along with impl of EvoNorm and InplaceAbn wrapper that fit the interface
* Torchscript works for all but two of the model types as long as using Pytorch 1.5+, tests added for this
* Some import cleanup and classifier reset changes, all models will have classifier reset to nn.Identity on reset_classifer(0) call
* Prep for 0.1.28 pip release
### May 12, 2020
* Add ResNeSt models (code adapted from https://github.com/zhanghang1989/ResNeSt, paper https://arxiv.org/abs/2004.08955))
### May 3, 2020
* Pruned EfficientNet B1, B2, and B3 (https://arxiv.org/abs/2002.08258) contributed by [Yonathan Aflalo](https://github.com/yoniaflalo)
### May 1, 2020
* Merged a number of execellent contributions in the ResNet model family over the past month
* BlurPool2D and resnetblur models initiated by [Chris Ha](https://github.com/VRandme), I trained resnetblur50 to 79.3.
* TResNet models and SpaceToDepth, AntiAliasDownsampleLayer layers by [mrT23](https://github.com/mrT23)
* ecaresnet (50d, 101d, light) models and two pruned variants using pruning as per (https://arxiv.org/abs/2002.08258) by [Yonathan Aflalo](https://github.com/yoniaflalo)
* 200 pretrained models in total now with updated results csv in results folder
### April 5, 2020
* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
* 3.5M param MobileNet-V2 100 @ 73%
* 4.5M param MobileNet-V2 110d @ 75%
* 6.1M param MobileNet-V2 140 @ 76.5%
* 5.8M param MobileNet-V2 120d @ 77.3%
### March 18, 2020
* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
### April 5, 2020
* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
* 3.5M param MobileNet-V2 100 @ 73%
* 4.5M param MobileNet-V2 110d @ 75%
* 6.1M param MobileNet-V2 140 @ 76.5%
* 5.8M param MobileNet-V2 120d @ 77.3%
### March 18, 2020
* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
### Feb 29, 2020
* New MobileNet-V3 Large weights trained from stratch with this code to 75.77% top-1
* IMPORTANT CHANGE - default weight init changed for all MobilenetV3 / EfficientNet / related models
* overall results similar to a bit better training from scratch on a few smaller models tried
* performance early in training seems consistently improved but less difference by end
* set `fix_group_fanout=False` in `_init_weight_goog` fn if you need to reproducte past behaviour
* Experimental LR noise feature added applies a random perturbation to LR each epoch in specified range of training
### Feb 18, 2020
* Big refactor of model layers and addition of several attention mechanisms. Several additions motivated by 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268):
* Move layer/module impl into `layers` subfolder/module of `models` and organize in a more granular fashion
* ResNet downsample paths now properly support dilation (output stride != 32) for avg_pool ('D' variant) and 3x3 (SENets) networks
* Add Selective Kernel Nets on top of ResNet base, pretrained weights
* skresnet18 - 73% top-1
* skresnet34 - 76.9% top-1
* skresnext50_32x4d (equiv to SKNet50) - 80.2% top-1
* ECA and CECA (circular padding) attention layer contributed by [Chris Ha](https://github.com/VRandme)
* CBAM attention experiment (not the best results so far, may remove)
* Attention factory to allow dynamically selecting one of SE, ECA, CBAM in the `.se` position for all ResNets
* Add DropBlock and DropPath (formerly DropConnect for EfficientNet/MobileNetv3) support to all ResNet variants
* Full dataset results updated that incl NoisyStudent weights and 2 of the 3 SK weights
### Feb 12, 2020
* Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
### Feb 6, 2020
* Add RandAugment trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
### Feb 1/2, 2020
* Port new EfficientNet-B8 (RandAugment) weights, these are different than the B8 AdvProp, different input normalization.
* Update results csv files on all models for ImageNet validation and three other test sets
* Push PyPi package update
### Jan 31, 2020
* Update ResNet50 weights with a new 79.038 result from further JSD / AugMix experiments. Full command line for reproduction in training section below.
### Jan 11/12, 2020
* Master may be a bit unstable wrt to training, these changes have been tested but not all combos
* Implementations of AugMix added to existing RA and AA. Including numerous supporting pieces like JSD loss (Jensen-Shannon divergence + CE), and AugMixDataset
* SplitBatchNorm adaptation layer added for implementing Auxiliary BN as per AdvProp paper
* ResNet-50 AugMix trained model w/ 79% top-1 added
* `seresnext26tn_32x4d` - 77.99 top-1, 93.75 top-5 added to tiered experiment, higher img/s than 't' and 'd'
### Jan 3, 2020
* Add RandAugment trained EfficientNet-B0 weight with 77.7 top-1. Trained by [Michael Klachko](https://github.com/michaelklachko) with this code and recent hparams (see Training section)
* Add `avg_checkpoints.py` script for post training weight averaging and update all scripts with header docstrings and shebangs.
### Dec 30, 2019
* Merge [Dushyant Mehta's](https://github.com/mehtadushy) PR for SelecSLS (Selective Short and Long Range Skip Connections) networks. Good GPU memory consumption and throughput. Original: https://github.com/mehtadushy/SelecSLS-Pytorch
### Dec 28, 2019
* Add new model weights and training hparams (see Training Hparams section)
* `efficientnet_b3` - 81.5 top-1, 95.7 top-5 at default res/crop, 81.9, 95.8 at 320x320 1.0 crop-pct
* trained with RandAugment, ended up with an interesting but less than perfect result (see training section)
* `seresnext26d_32x4d`- 77.6 top-1, 93.6 top-5
* deep stem (32, 32, 64), avgpool downsample
* stem/dowsample from bag-of-tricks paper
* `seresnext26t_32x4d`- 78.0 top-1, 93.7 top-5
* deep tiered stem (24, 48, 64), avgpool downsample (a modified 'D' variant)
* stem sizing mods from Jeremy Howard and fastai devs discussing ResNet architecture experiments
### Dec 23, 2019
* Add RandAugment trained MixNet-XL weights with 80.48 top-1.
* `--dist-bn` argument added to train.py, will distribute BN stats between nodes after each train epoch, before eval
### Dec 4, 2019
* Added weights from the first training from scratch of an EfficientNet (B2) with my new RandAugment implementation. Much better than my previous B2 and very close to the official AdvProp ones (80.4 top-1, 95.08 top-5).
### Nov 29, 2019
* Brought EfficientNet and MobileNetV3 up to date with my https://github.com/rwightman/gen-efficientnet-pytorch code. Torchscript and ONNX export compat excluded.
* AdvProp weights added
* Official TF MobileNetv3 weights added
* EfficientNet and MobileNetV3 hook based 'feature extraction' classes added. Will serve as basis for using models as backbones in obj detection/segmentation tasks. Lots more to be done here...
* HRNet classification models and weights added from https://github.com/HRNet/HRNet-Image-Classification
* Consistency in global pooling, `reset_classifer`, and `forward_features` across models
* `forward_features` always returns unpooled feature maps now
* Reasonable chance I broke something... let me know
### Nov 22, 2019
* Add ImageNet training RandAugment implementation alongside AutoAugment. PyTorch Transform compatible format, using PIL. Currently training two EfficientNet models from scratch with promising results... will update.
* `drop-connect` cmd line arg finally added to `train.py`, no need to hack model fns. Works for efficientnet/mobilenetv3 based models, ignored otherwise. | pytorch-image-models/docs/archived_changes.md/0 | {
"file_path": "pytorch-image-models/docs/archived_changes.md",
"repo_id": "pytorch-image-models",
"token_count": 9335
} | 179 |
# Deep Layer Aggregation
Extending âshallowâ skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks.
IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{yu2019deep,
title={Deep Layer Aggregation},
author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell},
year={2019},
eprint={1707.06484},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: DLA
Paper:
Title: Deep Layer Aggregation
URL: https://paperswithcode.com/paper/deep-layer-aggregation
Models:
- Name: dla102
In Collection: DLA
Metadata:
FLOPs: 7192952808
Parameters: 33270000
File Size: 135290579
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L410
Weights: http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.03%
Top 5 Accuracy: 93.95%
- Name: dla102x
In Collection: DLA
Metadata:
FLOPs: 5886821352
Parameters: 26310000
File Size: 107552695
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L418
Weights: http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.51%
Top 5 Accuracy: 94.23%
- Name: dla102x2
In Collection: DLA
Metadata:
FLOPs: 9343847400
Parameters: 41280000
File Size: 167645295
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x2
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L426
Weights: http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.44%
Top 5 Accuracy: 94.65%
- Name: dla169
In Collection: DLA
Metadata:
FLOPs: 11598004200
Parameters: 53390000
File Size: 216547113
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla169
LR: 0.1
Epochs: 120
Layers: 169
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L434
Weights: http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.69%
Top 5 Accuracy: 94.33%
- Name: dla34
In Collection: DLA
Metadata:
FLOPs: 3070105576
Parameters: 15740000
File Size: 63228658
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla34
LR: 0.1
Epochs: 120
Layers: 32
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L362
Weights: http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.62%
Top 5 Accuracy: 92.06%
- Name: dla46_c
In Collection: DLA
Metadata:
FLOPs: 583277288
Parameters: 1300000
File Size: 5307963
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L369
Weights: http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 64.87%
Top 5 Accuracy: 86.29%
- Name: dla46x_c
In Collection: DLA
Metadata:
FLOPs: 544052200
Parameters: 1070000
File Size: 4387641
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46x_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L378
Weights: http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.98%
Top 5 Accuracy: 86.99%
- Name: dla60
In Collection: DLA
Metadata:
FLOPs: 4256251880
Parameters: 22040000
File Size: 89560235
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60
LR: 0.1
Epochs: 120
Layers: 60
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L394
Weights: http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.04%
Top 5 Accuracy: 93.32%
- Name: dla60_res2net
In Collection: DLA
Metadata:
FLOPs: 4147578504
Parameters: 20850000
File Size: 84886593
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2net
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L346
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.46%
Top 5 Accuracy: 94.21%
- Name: dla60_res2next
In Collection: DLA
Metadata:
FLOPs: 3485335272
Parameters: 17030000
File Size: 69639245
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2next
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L354
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.44%
Top 5 Accuracy: 94.16%
- Name: dla60x
In Collection: DLA
Metadata:
FLOPs: 3544204264
Parameters: 17350000
File Size: 70883139
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L402
Weights: http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.25%
Top 5 Accuracy: 94.02%
- Name: dla60x_c
In Collection: DLA
Metadata:
FLOPs: 593325032
Parameters: 1320000
File Size: 5454396
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x_c
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L386
Weights: http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.91%
Top 5 Accuracy: 88.42%
-->
| pytorch-image-models/docs/models/.templates/models/dla.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/dla.md",
"repo_id": "pytorch-image-models",
"token_count": 5955
} | 180 |
# Inception ResNet v2
**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{szegedy2016inceptionv4,
title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
year={2016},
eprint={1602.07261},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Inception ResNet v2
Paper:
Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning
URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
Models:
- Name: inception_resnet_v2
In Collection: Inception ResNet v2
Metadata:
FLOPs: 16959133120
Parameters: 55850000
File Size: 223774238
Architecture:
- Average Pooling
- Dropout
- Inception-ResNet-v2 Reduction-B
- Inception-ResNet-v2-A
- Inception-ResNet-v2-B
- Inception-ResNet-v2-C
- Reduction-A
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 20x NVIDIA Kepler GPUs
ID: inception_resnet_v2
LR: 0.045
Dropout: 0.2
Crop Pct: '0.897'
Momentum: 0.9
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L343
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 0.95%
Top 5 Accuracy: 17.29%
-->
| pytorch-image-models/docs/models/.templates/models/inception-resnet-v2.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/inception-resnet-v2.md",
"repo_id": "pytorch-image-models",
"token_count": 864
} | 181 |
# Res2NeXt
**Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{Gao_2021,
title={Res2Net: A New Multi-Scale Backbone Architecture},
volume={43},
ISSN={1939-3539},
url={http://dx.doi.org/10.1109/TPAMI.2019.2938758},
DOI={10.1109/tpami.2019.2938758},
number={2},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
year={2021},
month={Feb},
pages={652â662}
}
```
<!--
Type: model-index
Collections:
- Name: Res2NeXt
Paper:
Title: 'Res2Net: A New Multi-scale Backbone Architecture'
URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone
Models:
- Name: res2next50
In Collection: Res2NeXt
Metadata:
FLOPs: 5396798208
Parameters: 24670000
File Size: 99019592
Architecture:
- Batch Normalization
- Convolution
- Global Average Pooling
- ReLU
- Res2NeXt Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x Titan Xp GPUs
ID: res2next50
LR: 0.1
Epochs: 100
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L207
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.24%
Top 5 Accuracy: 93.91%
-->
| pytorch-image-models/docs/models/.templates/models/res2next.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/res2next.md",
"repo_id": "pytorch-image-models",
"token_count": 905
} | 182 |
# (Tensorflow) EfficientNet CondConv
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to squeeze-and-excitation blocks.
This collection of models amends EfficientNet by adding [CondConv](https://paperswithcode.com/method/condconv) convolutions.
The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1904-04971,
author = {Brandon Yang and
Gabriel Bender and
Quoc V. Le and
Jiquan Ngiam},
title = {Soft Conditional Computation},
journal = {CoRR},
volume = {abs/1904.04971},
year = {2019},
url = {http://arxiv.org/abs/1904.04971},
archivePrefix = {arXiv},
eprint = {1904.04971},
timestamp = {Thu, 25 Apr 2019 13:55:01 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1904-04971.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: TF EfficientNet CondConv
Paper:
Title: 'CondConv: Conditionally Parameterized Convolutions for Efficient Inference'
URL: https://paperswithcode.com/paper/soft-conditional-computation
Models:
- Name: tf_efficientnet_cc_b0_4e
In Collection: TF EfficientNet CondConv
Metadata:
FLOPs: 224153788
Parameters: 13310000
File Size: 53490940
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- CondConv
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_cc_b0_4e
LR: 0.256
Epochs: 350
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1561
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.32%
Top 5 Accuracy: 93.32%
- Name: tf_efficientnet_cc_b0_8e
In Collection: TF EfficientNet CondConv
Metadata:
FLOPs: 224158524
Parameters: 24010000
File Size: 96287616
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- CondConv
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_cc_b0_8e
LR: 0.256
Epochs: 350
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1572
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.91%
Top 5 Accuracy: 93.65%
- Name: tf_efficientnet_cc_b1_8e
In Collection: TF EfficientNet CondConv
Metadata:
FLOPs: 370427824
Parameters: 39720000
File Size: 159206198
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- CondConv
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_cc_b1_8e
LR: 0.256
Epochs: 350
Crop Pct: '0.882'
Momentum: 0.9
Batch Size: 2048
Image Size: '240'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1584
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.33%
Top 5 Accuracy: 94.37%
-->
| pytorch-image-models/docs/models/.templates/models/tf-efficientnet-condconv.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/tf-efficientnet-condconv.md",
"repo_id": "pytorch-image-models",
"token_count": 2457
} | 183 |
# DenseNet
**DenseNet** is a type of convolutional neural network that utilises dense connections between layers, through [Dense Blocks](http://www.paperswithcode.com/method/dense-block), where we connect *all layers* (with matching feature-map sizes) directly with each other. To preserve the feed-forward nature, each layer obtains additional inputs from all preceding layers and passes on its own feature-maps to all subsequent layers.
The **DenseNet Blur** variant in this collection by Ross Wightman employs [Blur Pooling](http://www.paperswithcode.com/method/blur-pooling)
## How do I use this model on an image?
To load a pretrained model:
```python
import timm
model = timm.create_model('densenet121', pretrained=True)
model.eval()
```
To load and preprocess the image:
```python
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
img = Image.open(filename).convert('RGB')
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```python
import torch
with torch.no_grad():
out = model(tensor)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
print(probabilities.shape)
# prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```python
# Get imagenet class mappings
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
urllib.request.urlretrieve(url, filename)
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Print top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
# prints class names and probabilities like:
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `densenet121`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```python
model = timm.create_model('densenet121', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/HuangLW16a,
author = {Gao Huang and
Zhuang Liu and
Kilian Q. Weinberger},
title = {Densely Connected Convolutional Networks},
journal = {CoRR},
volume = {abs/1608.06993},
year = {2016},
url = {http://arxiv.org/abs/1608.06993},
archivePrefix = {arXiv},
eprint = {1608.06993},
timestamp = {Mon, 10 Sep 2018 15:49:32 +0200},
biburl = {https://dblp.org/rec/journals/corr/HuangLW16a.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
```
@misc{rw2019timm,
author = {Ross Wightman},
title = {PyTorch Image Models},
year = {2019},
publisher = {GitHub},
journal = {GitHub repository},
doi = {10.5281/zenodo.4414861},
howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
}
```
<!--
Type: model-index
Collections:
- Name: DenseNet
Paper:
Title: Densely Connected Convolutional Networks
URL: https://paperswithcode.com/paper/densely-connected-convolutional-networks
Models:
- Name: densenet121
In Collection: DenseNet
Metadata:
FLOPs: 3641843200
Parameters: 7980000
File Size: 32376726
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Block
- Dense Connections
- Dropout
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Kaiming Initialization
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
ID: densenet121
LR: 0.1
Epochs: 90
Layers: 121
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L295
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.56%
Top 5 Accuracy: 92.65%
- Name: densenet161
In Collection: DenseNet
Metadata:
FLOPs: 9931959264
Parameters: 28680000
File Size: 115730790
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Block
- Dense Connections
- Dropout
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Kaiming Initialization
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
ID: densenet161
LR: 0.1
Epochs: 90
Layers: 161
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L347
Weights: https://download.pytorch.org/models/densenet161-8d451a50.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.36%
Top 5 Accuracy: 93.63%
- Name: densenet169
In Collection: DenseNet
Metadata:
FLOPs: 4316945792
Parameters: 14150000
File Size: 57365526
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Block
- Dense Connections
- Dropout
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Kaiming Initialization
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
ID: densenet169
LR: 0.1
Epochs: 90
Layers: 169
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L327
Weights: https://download.pytorch.org/models/densenet169-b2777c0a.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.9%
Top 5 Accuracy: 93.02%
- Name: densenet201
In Collection: DenseNet
Metadata:
FLOPs: 5514321024
Parameters: 20010000
File Size: 81131730
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Block
- Dense Connections
- Dropout
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Kaiming Initialization
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
ID: densenet201
LR: 0.1
Epochs: 90
Layers: 201
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L337
Weights: https://download.pytorch.org/models/densenet201-c1103571.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.29%
Top 5 Accuracy: 93.48%
- Name: densenetblur121d
In Collection: DenseNet
Metadata:
FLOPs: 3947812864
Parameters: 8000000
File Size: 32456500
Architecture:
- 1x1 Convolution
- Batch Normalization
- Blur Pooling
- Convolution
- Dense Block
- Dense Connections
- Dropout
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: densenetblur121d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L305
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.59%
Top 5 Accuracy: 93.2%
- Name: tv_densenet121
In Collection: DenseNet
Metadata:
FLOPs: 3641843200
Parameters: 7980000
File Size: 32342954
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Block
- Dense Connections
- Dropout
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: tv_densenet121
LR: 0.1
Epochs: 90
Crop Pct: '0.875'
LR Gamma: 0.1
Momentum: 0.9
Batch Size: 32
Image Size: '224'
LR Step Size: 30
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L379
Weights: https://download.pytorch.org/models/densenet121-a639ec97.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.74%
Top 5 Accuracy: 92.15%
--> | pytorch-image-models/docs/models/densenet.md/0 | {
"file_path": "pytorch-image-models/docs/models/densenet.md",
"repo_id": "pytorch-image-models",
"token_count": 4185
} | 184 |
# Instagram ResNeXt WSL
A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width.
This model was trained on billions of Instagram images using thousands of distinct hashtags as labels exhibit excellent transfer learning performance.
Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
## How do I use this model on an image?
To load a pretrained model:
```python
import timm
model = timm.create_model('ig_resnext101_32x16d', pretrained=True)
model.eval()
```
To load and preprocess the image:
```python
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
img = Image.open(filename).convert('RGB')
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```python
import torch
with torch.no_grad():
out = model(tensor)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
print(probabilities.shape)
# prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```python
# Get imagenet class mappings
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
urllib.request.urlretrieve(url, filename)
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Print top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
# prints class names and probabilities like:
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `ig_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```python
model = timm.create_model('ig_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{mahajan2018exploring,
title={Exploring the Limits of Weakly Supervised Pretraining},
author={Dhruv Mahajan and Ross Girshick and Vignesh Ramanathan and Kaiming He and Manohar Paluri and Yixuan Li and Ashwin Bharambe and Laurens van der Maaten},
year={2018},
eprint={1805.00932},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: IG ResNeXt
Paper:
Title: Exploring the Limits of Weakly Supervised Pretraining
URL: https://paperswithcode.com/paper/exploring-the-limits-of-weakly-supervised
Models:
- Name: ig_resnext101_32x16d
In Collection: IG ResNeXt
Metadata:
FLOPs: 46623691776
Parameters: 194030000
File Size: 777518664
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- IG-3.5B-17k
- ImageNet
Training Resources: 336x GPUs
ID: ig_resnext101_32x16d
Epochs: 100
Layers: 101
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 8064
Image Size: '224'
Weight Decay: 0.001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L874
Weights: https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.16%
Top 5 Accuracy: 97.19%
- Name: ig_resnext101_32x32d
In Collection: IG ResNeXt
Metadata:
FLOPs: 112225170432
Parameters: 468530000
File Size: 1876573776
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- IG-3.5B-17k
- ImageNet
Training Resources: 336x GPUs
ID: ig_resnext101_32x32d
Epochs: 100
Layers: 101
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 8064
Image Size: '224'
Weight Decay: 0.001
Interpolation: bilinear
Minibatch Size: 8064
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L885
Weights: https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.09%
Top 5 Accuracy: 97.44%
- Name: ig_resnext101_32x48d
In Collection: IG ResNeXt
Metadata:
FLOPs: 197446554624
Parameters: 828410000
File Size: 3317136976
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- IG-3.5B-17k
- ImageNet
Training Resources: 336x GPUs
ID: ig_resnext101_32x48d
Epochs: 100
Layers: 101
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 8064
Image Size: '224'
Weight Decay: 0.001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L896
Weights: https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.42%
Top 5 Accuracy: 97.58%
- Name: ig_resnext101_32x8d
In Collection: IG ResNeXt
Metadata:
FLOPs: 21180417024
Parameters: 88790000
File Size: 356056638
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- IG-3.5B-17k
- ImageNet
Training Resources: 336x GPUs
ID: ig_resnext101_32x8d
Epochs: 100
Layers: 101
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 8064
Image Size: '224'
Weight Decay: 0.001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L863
Weights: https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.7%
Top 5 Accuracy: 96.64%
--> | pytorch-image-models/docs/models/ig-resnext.md/0 | {
"file_path": "pytorch-image-models/docs/models/ig-resnext.md",
"repo_id": "pytorch-image-models",
"token_count": 3230
} | 185 |
# Feature Extraction
All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification.
## Penultimate Layer Features (Pre-Classifier Features)
The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features.
### Unpooled
There are three ways to obtain unpooled features.
Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks.
If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network.
#### forward_features()
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('xception41', pretrained=True)
>>> o = m(torch.randn(2, 3, 299, 299))
>>> print(f'Original shape: {o.shape}')
>>> o = m.forward_features(torch.randn(2, 3, 299, 299))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Unpooled shape: torch.Size([2, 2048, 10, 10])
```
#### Create with no classifier and pooling
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Unpooled shape: torch.Size([2, 2048, 7, 7])
```
#### Remove it later
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('densenet121', pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Original shape: {o.shape}')
>>> m.reset_classifier(0, '')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Unpooled shape: torch.Size([2, 1024, 7, 7])
```
### Pooled
To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact.
#### Create with no classifier
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnet50', pretrained=True, num_classes=0)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Pooled shape: {o.shape}')
```
Output:
```text
Pooled shape: torch.Size([2, 2048])
```
#### Remove it later
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('ese_vovnet19b_dw', pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Original shape: {o.shape}')
>>> m.reset_classifier(0)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Pooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Pooled shape: torch.Size([2, 1024])
```
## Multi-scale Feature Maps (Feature Pyramid)
Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library.
`timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels.
A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default 5 strides will be output from most models (not all have that many), with the first starting at 2 (some start at 1 or 4).
### Create a feature map extraction model
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnest26d', features_only=True, pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> for x in o:
... print(x.shape)
```
Output:
```text
torch.Size([2, 64, 112, 112])
torch.Size([2, 256, 56, 56])
torch.Size([2, 512, 28, 28])
torch.Size([2, 1024, 14, 14])
torch.Size([2, 2048, 7, 7])
```
### Query the feature information
After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points.
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('regnety_032', features_only=True, pretrained=True)
>>> print(f'Feature channels: {m.feature_info.channels()}')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> for x in o:
... print(x.shape)
```
Output:
```text
Feature channels: [32, 72, 216, 576, 1512]
torch.Size([2, 32, 112, 112])
torch.Size([2, 72, 56, 56])
torch.Size([2, 216, 28, 28])
torch.Size([2, 576, 14, 14])
torch.Size([2, 1512, 7, 7])
```
### Select specific feature levels or limit the stride
There are two additional creation arguments impacting the output features.
* `out_indices` selects which indices to output
* `output_stride` limits the feature output stride of the network (also works in classification mode BTW)
`out_indices` is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most models, index 0 is the stride 2 features, and index 4 is stride 32.
`output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`.
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True)
>>> print(f'Feature channels: {m.feature_info.channels()}')
>>> print(f'Feature reduction: {m.feature_info.reduction()}')
>>> o = m(torch.randn(2, 3, 320, 320))
>>> for x in o:
... print(x.shape)
```
Output:
```text
Feature channels: [512, 2048]
Feature reduction: [8, 8]
torch.Size([2, 512, 40, 40])
torch.Size([2, 2048, 40, 40])
```
| pytorch-image-models/hfdocs/source/feature_extraction.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/feature_extraction.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2004
} | 186 |
# EfficientNet
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('efficientnet_b0', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `efficientnet_b0`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2020efficientnet,
title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
author={Mingxing Tan and Quoc V. Le},
year={2020},
eprint={1905.11946},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: EfficientNet
Paper:
Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
Models:
- Name: efficientnet_b0
In Collection: EfficientNet
Metadata:
FLOPs: 511241564
Parameters: 5290000
File Size: 21376743
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b0
Layers: 18
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1002
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.71%
Top 5 Accuracy: 93.52%
- Name: efficientnet_b1
In Collection: EfficientNet
Metadata:
FLOPs: 909691920
Parameters: 7790000
File Size: 31502706
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b1
Crop Pct: '0.875'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1011
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.71%
Top 5 Accuracy: 94.15%
- Name: efficientnet_b2
In Collection: EfficientNet
Metadata:
FLOPs: 1265324514
Parameters: 9110000
File Size: 36788104
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b2
Crop Pct: '0.875'
Image Size: '260'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1020
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.38%
Top 5 Accuracy: 95.08%
- Name: efficientnet_b2a
In Collection: EfficientNet
Metadata:
FLOPs: 1452041554
Parameters: 9110000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b2a
Crop Pct: '1.0'
Image Size: '288'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1029
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.61%
Top 5 Accuracy: 95.32%
- Name: efficientnet_b3
In Collection: EfficientNet
Metadata:
FLOPs: 2327905920
Parameters: 12230000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b3
Crop Pct: '0.904'
Image Size: '300'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1038
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.08%
Top 5 Accuracy: 96.03%
- Name: efficientnet_b3a
In Collection: EfficientNet
Metadata:
FLOPs: 2600628304
Parameters: 12230000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b3a
Crop Pct: '1.0'
Image Size: '320'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1047
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.25%
Top 5 Accuracy: 96.11%
- Name: efficientnet_em
In Collection: EfficientNet
Metadata:
FLOPs: 3935516480
Parameters: 6900000
File Size: 27927309
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_em
Crop Pct: '0.882'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1118
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.26%
Top 5 Accuracy: 94.79%
- Name: efficientnet_es
In Collection: EfficientNet
Metadata:
FLOPs: 2317181824
Parameters: 5440000
File Size: 22003339
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_es
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1110
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.09%
Top 5 Accuracy: 93.93%
- Name: efficientnet_lite0
In Collection: EfficientNet
Metadata:
FLOPs: 510605024
Parameters: 4650000
File Size: 18820005
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_lite0
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1163
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.5%
Top 5 Accuracy: 92.51%
-->
| pytorch-image-models/hfdocs/source/models/efficientnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/efficientnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4915
} | 187 |
# (Legacy) SE-ResNeXt
**SE ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `legacy_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{hu2019squeezeandexcitation,
title={Squeeze-and-Excitation Networks},
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
year={2019},
eprint={1709.01507},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Legacy SE ResNeXt
Paper:
Title: Squeeze-and-Excitation Networks
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
Models:
- Name: legacy_seresnext101_32x4d
In Collection: Legacy SE ResNeXt
Metadata:
FLOPs: 10287698672
Parameters: 48960000
File Size: 196466866
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnext101_32x4d
LR: 0.6
Epochs: 100
Layers: 101
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L462
Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.23%
Top 5 Accuracy: 95.02%
- Name: legacy_seresnext26_32x4d
In Collection: Legacy SE ResNeXt
Metadata:
FLOPs: 3187342304
Parameters: 16790000
File Size: 67346327
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnext26_32x4d
LR: 0.6
Epochs: 100
Layers: 26
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L448
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.11%
Top 5 Accuracy: 93.31%
- Name: legacy_seresnext50_32x4d
In Collection: Legacy SE ResNeXt
Metadata:
FLOPs: 5459954352
Parameters: 27560000
File Size: 110559176
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnext50_32x4d
LR: 0.6
Epochs: 100
Layers: 50
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L455
Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.08%
Top 5 Accuracy: 94.43%
--> | pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2730
} | 188 |
# ResNeXt
A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('resnext101_32x8d', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `resnext101_32x8d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('resnext101_32x8d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/XieGDTH16,
author = {Saining Xie and
Ross B. Girshick and
Piotr Doll{\'{a}}r and
Zhuowen Tu and
Kaiming He},
title = {Aggregated Residual Transformations for Deep Neural Networks},
journal = {CoRR},
volume = {abs/1611.05431},
year = {2016},
url = {http://arxiv.org/abs/1611.05431},
archivePrefix = {arXiv},
eprint = {1611.05431},
timestamp = {Mon, 13 Aug 2018 16:45:58 +0200},
biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: ResNeXt
Paper:
Title: Aggregated Residual Transformations for Deep Neural Networks
URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep
Models:
- Name: resnext101_32x8d
In Collection: ResNeXt
Metadata:
FLOPs: 21180417024
Parameters: 88790000
File Size: 356082095
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnext101_32x8d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L877
Weights: https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.3%
Top 5 Accuracy: 94.53%
- Name: resnext50_32x4d
In Collection: ResNeXt
Metadata:
FLOPs: 5472648192
Parameters: 25030000
File Size: 100435887
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnext50_32x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L851
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.79%
Top 5 Accuracy: 94.61%
- Name: resnext50d_32x4d
In Collection: ResNeXt
Metadata:
FLOPs: 5781119488
Parameters: 25050000
File Size: 100515304
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnext50d_32x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L869
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.67%
Top 5 Accuracy: 94.87%
- Name: tv_resnext50_32x4d
In Collection: ResNeXt
Metadata:
FLOPs: 5472648192
Parameters: 25030000
File Size: 100441675
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: tv_resnext50_32x4d
LR: 0.1
Epochs: 90
Crop Pct: '0.875'
LR Gamma: 0.1
Momentum: 0.9
Batch Size: 32
Image Size: '224'
LR Step Size: 30
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L842
Weights: https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.61%
Top 5 Accuracy: 93.68%
-->
| pytorch-image-models/hfdocs/source/models/resnext.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/resnext.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3056
} | 189 |
# (Tensorflow) MobileNet v3
**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-02244,
author = {Andrew Howard and
Mark Sandler and
Grace Chu and
Liang{-}Chieh Chen and
Bo Chen and
Mingxing Tan and
Weijun Wang and
Yukun Zhu and
Ruoming Pang and
Vijay Vasudevan and
Quoc V. Le and
Hartwig Adam},
title = {Searching for MobileNetV3},
journal = {CoRR},
volume = {abs/1905.02244},
year = {2019},
url = {http://arxiv.org/abs/1905.02244},
archivePrefix = {arXiv},
eprint = {1905.02244},
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: TF MobileNet V3
Paper:
Title: Searching for MobileNetV3
URL: https://paperswithcode.com/paper/searching-for-mobilenetv3
Models:
- Name: tf_mobilenetv3_large_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 194323712
Parameters: 3990000
File Size: 16097377
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_075
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 73.45%
Top 5 Accuracy: 91.34%
- Name: tf_mobilenetv3_large_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 274535288
Parameters: 5480000
File Size: 22076649
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.51%
Top 5 Accuracy: 92.61%
- Name: tf_mobilenetv3_large_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 267216928
Parameters: 3920000
File Size: 15836368
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_minimal_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.24%
Top 5 Accuracy: 90.64%
- Name: tf_mobilenetv3_small_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 48457664
Parameters: 2040000
File Size: 8242701
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_075
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.72%
Top 5 Accuracy: 86.13%
- Name: tf_mobilenetv3_small_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 65450600
Parameters: 2540000
File Size: 10256398
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.92%
Top 5 Accuracy: 87.68%
- Name: tf_mobilenetv3_small_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 60827936
Parameters: 2040000
File Size: 8258083
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_minimal_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 62.91%
Top 5 Accuracy: 84.24%
--> | pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4781
} | 190 |
""" ONNX-runtime validation script
This script was created to verify accuracy and performance of exported ONNX
models running with the onnxruntime. It utilizes the PyTorch dataloader/processing
pipeline for a fair comparison against the originals.
Copyright 2020 Ross Wightman
"""
import argparse
import numpy as np
import onnxruntime
from timm.data import create_loader, resolve_data_config, create_dataset
from timm.utils import AverageMeter
import time
parser = argparse.ArgumentParser(description='ONNX Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--onnx-input', default='', type=str, metavar='PATH',
help='path to onnx model/weights file')
parser.add_argument('--onnx-output-opt', default='', type=str, metavar='PATH',
help='path to output optimized onnx graph')
parser.add_argument('--profile', action='store_true', default=False,
help='Enable profiler output.')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT',
help='Override default crop pct of 0.875')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
def main():
args = parser.parse_args()
args.gpu_id = 0
# Set graph optimization level
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
if args.profile:
sess_options.enable_profiling = True
if args.onnx_output_opt:
sess_options.optimized_model_filepath = args.onnx_output_opt
session = onnxruntime.InferenceSession(args.onnx_input, sess_options)
data_config = resolve_data_config(vars(args))
loader = create_loader(
create_dataset('', args.data),
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=False,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=data_config['crop_pct']
)
input_name = session.get_inputs()[0].name
batch_time = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(loader):
# run the net and return prediction
output = session.run([], {input_name: input.data.numpy()})
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy_np(output, target.numpy())
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(
f'Test: [{i}/{len(loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {input.size(0) / batch_time.avg:.3f}/s, '
f'{100 * batch_time.avg / input.size(0):.3f} ms/sample) \t'
f'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
f'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
)
print(f' * Prec@1 {top1.avg:.3f} ({100-top1.avg:.3f}) Prec@5 {top5.avg:.3f} ({100.-top5.avg:.3f})')
def accuracy_np(output, target):
max_indices = np.argsort(output, axis=1)[:, ::-1]
top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean()
top1 = 100 * np.equal(max_indices[:, 0], target).mean()
return top1, top5
if __name__ == '__main__':
main()
| pytorch-image-models/onnx_validate.py/0 | {
"file_path": "pytorch-image-models/onnx_validate.py",
"repo_id": "pytorch-image-models",
"token_count": 1960
} | 191 |
""" Optimzier Tests
These tests were adapted from PyTorch' optimizer tests.
"""
import math
import pytest
import functools
from copy import deepcopy
import torch
from torch.testing._internal.common_utils import TestCase
from torch.nn import Parameter
from timm.scheduler import PlateauLRScheduler
from timm.optim import create_optimizer_v2
import importlib
import os
torch_backend = os.environ.get('TORCH_BACKEND')
if torch_backend is not None:
importlib.import_module(torch_backend)
torch_device = os.environ.get('TORCH_DEVICE', 'cuda')
# HACK relying on internal PyTorch test functionality for comparisons that I don't want to write
torch_tc = TestCase()
def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors):
weight = Parameter(weight)
bias = Parameter(bias)
input = Parameter(input)
optimizer = constructor(weight, bias)
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
for _i in range(200):
for scheduler in schedulers:
if isinstance(scheduler, PlateauLRScheduler):
val_loss = fn()
scheduler.step(val_loss)
else:
scheduler.step()
optimizer.step(fn)
assert fn().item() < initial_value
def _test_state_dict(weight, bias, input, constructor):
weight = Parameter(weight)
bias = Parameter(bias)
input = Parameter(input)
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input_device if weight.device.type != 'cpu' else input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = constructor(weight, bias)
fn = functools.partial(fn_base, optimizer, weight, bias)
# Prime the optimizer
for _i in range(20):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
with torch.no_grad():
weight_c = Parameter(weight.clone().detach())
bias_c = Parameter(bias.clone().detach())
optimizer_c = constructor(weight_c, bias_c)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict_c)
# Run both optimizations in parallel
for _i in range(20):
optimizer.step(fn)
optimizer_c.step(fn_c)
torch_tc.assertEqual(weight, weight_c)
torch_tc.assertEqual(bias, bias_c)
# Make sure state dict is deterministic with equal but not identical parameters
torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation in state dict
optimizer_c.param_groups.extend(optimizer_c.param_groups)
torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1])
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if torch_device == 'cpu':
return
elif torch_device == 'cuda' and not torch.cuda.is_available():
return
with torch.no_grad():
input_device = Parameter(input.clone().detach().float().to(torch_device))
weight_device = Parameter(weight.clone().detach().to(torch_device))
bias_device = Parameter(bias.clone().detach().to(torch_device))
optimizer_device = constructor(weight_device, bias_device)
fn_device = functools.partial(fn_base, optimizer_device, weight_device, bias_device)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_device.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
torch_tc.assertEqual(state_dict, state_dict_c)
for _i in range(20):
optimizer.step(fn)
optimizer_device.step(fn_device)
torch_tc.assertEqual(weight, weight_device)
torch_tc.assertEqual(bias, bias_device)
# validate deepcopy() copies all public attributes
def getPublicAttr(obj):
return set(k for k in obj.__dict__ if not k.startswith('_'))
assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer))
def _test_basic_cases(constructor, scheduler_constructors=None):
if scheduler_constructors is None:
scheduler_constructors = []
_test_state_dict(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor
)
_test_basic_cases_template(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor,
scheduler_constructors
)
# non-contiguous parameters
_test_basic_cases_template(
torch.randn(10, 5, 2)[..., 0],
torch.randn(10, 2)[..., 0],
torch.randn(5),
constructor,
scheduler_constructors
)
# CUDA
if torch_device == 'cpu':
return
elif torch_device == 'cuda' and not torch.cuda.is_available():
return
_test_basic_cases_template(
torch.randn(10, 5).to(torch_device),
torch.randn(10).to(torch_device),
torch.randn(5).to(torch_device),
constructor,
scheduler_constructors
)
def _test_model(optimizer, params, device=torch.device('cpu')):
weight = torch.tensor(
[[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]],
device=device, requires_grad=True)
bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True)
weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True)
bias2 = torch.tensor([-0.0711], device=device, requires_grad=True)
input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2)
model = torch.nn.Sequential(torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid())
model.to(device)
pretrained_dict = model.state_dict()
pretrained_dict['0.weight'] = weight
pretrained_dict['0.bias'] = bias
pretrained_dict['2.weight'] = weight2
pretrained_dict['2.bias'] = bias2
model.load_state_dict(pretrained_dict)
optimizer = create_optimizer_v2(model, opt=optimizer, **params)
prev_loss = float('inf')
for i in range(20):
optimizer.zero_grad()
output = model(input)
loss = output.sum()
loss.backward()
loss = loss.item()
assert loss < prev_loss
prev_loss = loss
optimizer.step()
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
def drosenbrock(tensor):
x, y = tensor
return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2)))
def _test_rosenbrock(constructor, scheduler_constructors=None):
if scheduler_constructors is None:
scheduler_constructors = []
params_t = torch.tensor([1.5, 1.5])
params = Parameter(params_t)
optimizer = constructor([params])
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
solution = torch.tensor([1, 1])
initial_dist = params.clone().detach().dist(solution)
def eval(params, w):
# Depending on w, provide only the x or y gradient
optimizer.zero_grad()
loss = rosenbrock(params)
loss.backward()
grad = drosenbrock(params.clone().detach())
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
if w:
i = torch.LongTensor([[0, 0]])
x = grad[0]
v = torch.tensor([x / 4., x - x / 4.])
else:
i = torch.LongTensor([[1, 1]])
y = grad[1]
v = torch.tensor([y - y / 4., y / 4.])
x = torch.sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype)
with torch.no_grad():
params.grad = x.to_dense()
return loss
for i in range(2000):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, w))
for scheduler in schedulers:
if isinstance(scheduler, PlateauLRScheduler):
scheduler.step(rosenbrock(params))
else:
scheduler.step()
torch_tc.assertLessEqual(params.clone().detach().dist(solution), initial_dist)
def _build_params_dict(weight, bias, **kwargs):
return [{'params': [weight]}, dict(params=[bias], **kwargs)]
def _build_params_dict_single(weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]
#@pytest.mark.parametrize('optimizer', ['sgd', 'momentum'])
# FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts
@pytest.mark.parametrize('optimizer', ['sgd'])
def test_sgd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-2),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-2),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-2), optimizer)
)
# _test_basic_cases(
# lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10)]
# )
# _test_basic_cases(
# lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3),
# [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10),
# lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10),
# lambda opt: ReduceLROnPlateau(opt)]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.99, step_size=10),
# lambda opt: ExponentialLR(opt, gamma=0.99),
# lambda opt: ReduceLROnPlateau(opt)]
# )
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax'])
def test_adam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['adabelief'])
def test_adabelief(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['radam', 'radabelief'])
def test_rectified(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad'])
def test_adaother(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-1)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['adafactor'])
def test_adafactor(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(_build_params_dict_single(weight, bias), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['lamb', 'lambc'])
def test_lamb(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc'])
def test_lars(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw'])
def test_madgrad(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-2)
)
_test_model(optimizer, dict(lr=1e-2))
@pytest.mark.parametrize('optimizer', ['novograd'])
def test_novograd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf'])
def test_rmsprop(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-2)
)
_test_model(optimizer, dict(lr=1e-2))
@pytest.mark.parametrize('optimizer', ['adamp'])
def test_adamp(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['sgdp'])
def test_sgdp(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum'])
def test_lookahead_sgd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
@pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam'])
def test_lookahead_adam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
@pytest.mark.parametrize('optimizer', ['lookahead_radam'])
def test_lookahead_radam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-4)
)
| pytorch-image-models/tests/test_optim.py/0 | {
"file_path": "pytorch-image-models/tests/test_optim.py",
"repo_id": "pytorch-image-models",
"token_count": 11722
} | 192 |
""" Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2019, Ross Wightman
"""
import numpy as np
import torch
def one_hot(x, num_classes, on_value=1., off_value=0.):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=x.device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value)
return y1 * lam + y2 * (1. - lam)
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=None):
""" Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)
cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):
""" Generate bbox and apply lambda correction.
"""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=bool)
if self.mixup_enabled:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
elif self.cutmix_alpha > 0.:
use_cutmix = np.ones(batch_size, dtype=bool)
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing)
return x, target
class FastCollateMixup(Mixup):
""" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch
A Mixup impl that's performed while collating the batches.
"""
def _mix_elem_collate(self, output, batch, half=False):
batch_size = len(batch)
num_elem = batch_size // 2 if half else batch_size
assert len(output) == num_elem
lam_batch, use_cutmix = self._params_per_elem(num_elem)
for i in range(num_elem):
j = batch_size - i - 1
lam = lam_batch[i]
mixed = batch[i][0]
if lam != 1.:
if use_cutmix[i]:
if not half:
mixed = mixed.copy()
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
if half:
lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_pair_collate(self, output, batch):
batch_size = len(batch)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
mixed_i = batch[i][0]
mixed_j = batch[j][0]
assert 0 <= lam <= 1.0
if lam < 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
patch_i = mixed_i[:, yl:yh, xl:xh].copy()
mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]
mixed_j[:, yl:yh, xl:xh] = patch_i
lam_batch[i] = lam
else:
mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam)
mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam)
mixed_i = mixed_temp
np.rint(mixed_j, out=mixed_j)
np.rint(mixed_i, out=mixed_i)
output[i] += torch.from_numpy(mixed_i.astype(np.uint8))
output[j] += torch.from_numpy(mixed_j.astype(np.uint8))
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_batch_collate(self, output, batch):
batch_size = len(batch)
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
for i in range(batch_size):
j = batch_size - i - 1
mixed = batch[i][0]
if lam != 1.:
if use_cutmix:
mixed = mixed.copy() # don't want to modify the original while iterating
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
return lam
def __call__(self, batch, _=None):
batch_size = len(batch)
assert batch_size % 2 == 0, 'Batch size should be even when using this'
half = 'half' in self.mode
if half:
batch_size //= 2
output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
if self.mode == 'elem' or self.mode == 'half':
lam = self._mix_elem_collate(output, batch, half=half)
elif self.mode == 'pair':
lam = self._mix_pair_collate(output, batch)
else:
lam = self._mix_batch_collate(output, batch)
target = torch.tensor([b[1] for b in batch], dtype=torch.int64)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing)
target = target[:batch_size]
return output, target
| pytorch-image-models/timm/data/mixup.py/0 | {
"file_path": "pytorch-image-models/timm/data/mixup.py",
"repo_id": "pytorch-image-models",
"token_count": 7225
} | 193 |
""" Tensorflow Preprocessing Adapter
Allows use of Tensorflow preprocessing pipeline in PyTorch Transform
Copyright of original Tensorflow code below.
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing for MnasNet."""
import tensorflow.compat.v1 as tf
import numpy as np
IMAGE_SIZE = 224
CROP_PADDING = 32
tf.compat.v1.disable_eager_execution()
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size, resize_method):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0])
return image
def _decode_and_center_crop(image_bytes, image_size, resize_method):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [image_size, image_size], resize_method)[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor`.
"""
resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR
image = _decode_and_random_crop(image_bytes, image_size, resize_method)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor`.
"""
resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE,
interpolation='bicubic'):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor` with value range of [0, 255].
"""
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation)
class TfPreprocessTransform:
def __init__(self, is_training=False, size=224, interpolation='bicubic'):
self.is_training = is_training
self.size = size[0] if isinstance(size, tuple) else size
self.interpolation = interpolation
self._image_bytes = None
self.process_image = self._build_tf_graph()
self.sess = None
def _build_tf_graph(self):
with tf.device('/cpu:0'):
self._image_bytes = tf.placeholder(
shape=[],
dtype=tf.string,
)
img = preprocess_image(
self._image_bytes, self.is_training, False, self.size, self.interpolation)
return img
def __call__(self, image_bytes):
if self.sess is None:
self.sess = tf.Session()
img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes})
img = img.round().clip(0, 255).astype(np.uint8)
if img.ndim < 3:
img = np.expand_dims(img, axis=-1)
img = np.rollaxis(img, 2) # HWC to CHW
return img
| pytorch-image-models/timm/data/tf_preprocessing.py/0 | {
"file_path": "pytorch-image-models/timm/data/tf_preprocessing.py",
"repo_id": "pytorch-image-models",
"token_count": 3775
} | 194 |
""" Conv2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from .config import is_exportable, is_scriptable
from .padding import pad_same, pad_same_arg, get_padding_value
_USE_EXPORT_CONV = False
def conv2d_same(
x,
weight: torch.Tensor,
bias: Optional[torch.Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
groups: int = 1,
):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size,
stride, 0, dilation, groups, bias,
)
def forward(self, x):
return conv2d_same(
x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups,
)
class Conv2dSameExport(nn.Conv2d):
""" ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions
NOTE: This does not currently work with torch.jit.script
"""
# pylint: disable=unused-argument
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSameExport, self).__init__(
in_channels, out_channels, kernel_size,
stride, 0, dilation, groups, bias,
)
self.pad = None
self.pad_input_size = (0, 0)
def forward(self, x):
input_size = x.size()[-2:]
if self.pad is None:
pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation)
self.pad = nn.ZeroPad2d(pad_arg)
self.pad_input_size = input_size
x = self.pad(x)
return F.conv2d(
x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups,
)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop('padding', '')
kwargs.setdefault('bias', False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
if _USE_EXPORT_CONV and is_exportable():
# older PyTorch ver needed this to export same padding reasonably
assert not is_scriptable() # Conv2DSameExport does not work with jit
return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs)
else:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
| pytorch-image-models/timm/layers/conv2d_same.py/0 | {
"file_path": "pytorch-image-models/timm/layers/conv2d_same.py",
"repo_id": "pytorch-image-models",
"token_count": 1560
} | 195 |
""" Global Response Normalization Module
Based on the GRN layer presented in
`ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
This implementation
* works for both NCHW and NHWC tensor layouts
* uses affine param names matching existing torch norm layers
* slightly improves eager mode performance via fused addcmul
Hacked together by / Copyright 2023 Ross Wightman
"""
import torch
from torch import nn as nn
class GlobalResponseNorm(nn.Module):
""" Global Response Normalization layer
"""
def __init__(self, dim, eps=1e-6, channels_last=True):
super().__init__()
self.eps = eps
if channels_last:
self.spatial_dim = (1, 2)
self.channel_dim = -1
self.wb_shape = (1, 1, 1, -1)
else:
self.spatial_dim = (2, 3)
self.channel_dim = 1
self.wb_shape = (1, -1, 1, 1)
self.weight = nn.Parameter(torch.zeros(dim))
self.bias = nn.Parameter(torch.zeros(dim))
def forward(self, x):
x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True)
x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps)
return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
| pytorch-image-models/timm/layers/grn.py/0 | {
"file_path": "pytorch-image-models/timm/layers/grn.py",
"repo_id": "pytorch-image-models",
"token_count": 565
} | 196 |
""" Image to Patch Embedding using Conv2d
A convolution based approach to patchifying a 2D image w/ embedding projection.
Based on code in:
* https://github.com/google-research/vision_transformer
* https://github.com/google-research/big_vision/tree/main/big_vision
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from typing import Callable, List, Optional, Tuple, Union
import torch
from torch import nn as nn
import torch.nn.functional as F
from .format import Format, nchw_to
from .helpers import to_2tuple
from .trace_utils import _assert
_logger = logging.getLogger(__name__)
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
output_fmt: Format
dynamic_img_pad: torch.jit.Final[bool]
def __init__(
self,
img_size: Optional[int] = 224,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
norm_layer: Optional[Callable] = None,
flatten: bool = True,
output_fmt: Optional[str] = None,
bias: bool = True,
strict_img_size: bool = True,
dynamic_img_pad: bool = False,
):
super().__init__()
self.patch_size = to_2tuple(patch_size)
if img_size is not None:
self.img_size = to_2tuple(img_size)
self.grid_size = tuple([s // p for s, p in zip(self.img_size, self.patch_size)])
self.num_patches = self.grid_size[0] * self.grid_size[1]
else:
self.img_size = None
self.grid_size = None
self.num_patches = None
if output_fmt is not None:
self.flatten = False
self.output_fmt = Format(output_fmt)
else:
# flatten spatial dim and transpose to channels last, kept for bwd compat
self.flatten = flatten
self.output_fmt = Format.NCHW
self.strict_img_size = strict_img_size
self.dynamic_img_pad = dynamic_img_pad
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
if self.img_size is not None:
if self.strict_img_size:
_assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).")
_assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).")
elif not self.dynamic_img_pad:
_assert(
H % self.patch_size[0] == 0,
f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})."
)
_assert(
W % self.patch_size[1] == 0,
f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})."
)
if self.dynamic_img_pad:
pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
x = F.pad(x, (0, pad_w, 0, pad_h))
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
elif self.output_fmt != Format.NCHW:
x = nchw_to(x, self.output_fmt)
x = self.norm(x)
return x
class PatchEmbedWithSize(PatchEmbed):
""" 2D Image to Patch Embedding
"""
output_fmt: Format
def __init__(
self,
img_size: Optional[int] = 224,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
norm_layer: Optional[Callable] = None,
flatten: bool = True,
output_fmt: Optional[str] = None,
bias: bool = True,
):
super().__init__(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer,
flatten=flatten,
output_fmt=output_fmt,
bias=bias,
)
def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
B, C, H, W = x.shape
if self.img_size is not None:
_assert(H % self.patch_size[0] == 0, f"Input image height ({H}) must be divisible by patch size ({self.patch_size[0]}).")
_assert(W % self.patch_size[1] == 0, f"Input image width ({W}) must be divisible by patch size ({self.patch_size[1]}).")
x = self.proj(x)
grid_size = x.shape[-2:]
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
elif self.output_fmt != Format.NCHW:
x = nchw_to(x, self.output_fmt)
x = self.norm(x)
return x, grid_size
def resample_patch_embed(
patch_embed,
new_size: List[int],
interpolation: str = 'bicubic',
antialias: bool = True,
verbose: bool = False,
):
"""Resample the weights of the patch embedding kernel to target resolution.
We resample the patch embedding kernel by approximately inverting the effect
of patch resizing.
Code based on:
https://github.com/google-research/big_vision/blob/b00544b81f8694488d5f36295aeb7972f3755ffe/big_vision/models/proj/flexi/vit.py
With this resizing, we can for example load a B/8 filter into a B/16 model
and, on 2x larger input image, the result will match.
Args:
patch_embed: original parameter to be resized.
new_size (tuple(int, int): target shape (height, width)-only.
interpolation (str): interpolation for resize
antialias (bool): use anti-aliasing filter in resize
verbose (bool): log operation
Returns:
Resized patch embedding kernel.
"""
import numpy as np
try:
import functorch
vmap = functorch.vmap
except ImportError:
if hasattr(torch, 'vmap'):
vmap = torch.vmap
else:
assert False, "functorch or a version of torch with vmap is required for FlexiViT resizing."
assert len(patch_embed.shape) == 4, "Four dimensions expected"
assert len(new_size) == 2, "New shape should only be hw"
old_size = patch_embed.shape[-2:]
if tuple(old_size) == tuple(new_size):
return patch_embed
if verbose:
_logger.info(f"Resize patch embedding {patch_embed.shape} to {new_size}, w/ {interpolation} interpolation.")
def resize(x_np, _new_size):
x_tf = torch.Tensor(x_np)[None, None, ...]
x_upsampled = F.interpolate(
x_tf, size=_new_size, mode=interpolation, antialias=antialias)[0, 0, ...].numpy()
return x_upsampled
def get_resize_mat(_old_size, _new_size):
mat = []
for i in range(np.prod(_old_size)):
basis_vec = np.zeros(_old_size)
basis_vec[np.unravel_index(i, _old_size)] = 1.
mat.append(resize(basis_vec, _new_size).reshape(-1))
return np.stack(mat).T
resize_mat = get_resize_mat(old_size, new_size)
resize_mat_pinv = torch.tensor(np.linalg.pinv(resize_mat.T), device=patch_embed.device)
def resample_kernel(kernel):
resampled_kernel = resize_mat_pinv @ kernel.reshape(-1)
return resampled_kernel.reshape(new_size)
v_resample_kernel = vmap(vmap(resample_kernel, 0, 0), 1, 1)
orig_dtype = patch_embed.dtype
patch_embed = patch_embed.float()
patch_embed = v_resample_kernel(patch_embed)
patch_embed = patch_embed.to(orig_dtype)
return patch_embed
# def divs(n, m=None):
# m = m or n // 2
# if m == 1:
# return [1]
# if n % m == 0:
# return [m] + divs(n, m - 1)
# return divs(n, m - 1)
#
#
# class FlexiPatchEmbed(nn.Module):
# """ 2D Image to Patch Embedding w/ Flexible Patch sizes (FlexiViT)
# FIXME WIP
# """
# def __init__(
# self,
# img_size=240,
# patch_size=16,
# in_chans=3,
# embed_dim=768,
# base_img_size=240,
# base_patch_size=32,
# norm_layer=None,
# flatten=True,
# bias=True,
# ):
# super().__init__()
# self.img_size = to_2tuple(img_size)
# self.patch_size = to_2tuple(patch_size)
# self.num_patches = 0
#
# # full range for 240 = (5, 6, 8, 10, 12, 14, 15, 16, 20, 24, 30, 40, 48)
# self.seqhw = (6, 8, 10, 12, 14, 15, 16, 20, 24, 30)
#
# self.base_img_size = to_2tuple(base_img_size)
# self.base_patch_size = to_2tuple(base_patch_size)
# self.base_grid_size = tuple([i // p for i, p in zip(self.base_img_size, self.base_patch_size)])
# self.base_num_patches = self.base_grid_size[0] * self.base_grid_size[1]
#
# self.flatten = flatten
# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=bias)
# self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
#
# def forward(self, x):
# B, C, H, W = x.shape
#
# if self.patch_size == self.base_patch_size:
# weight = self.proj.weight
# else:
# weight = resample_patch_embed(self.proj.weight, self.patch_size)
# patch_size = self.patch_size
# x = F.conv2d(x, weight, bias=self.proj.bias, stride=patch_size)
# if self.flatten:
# x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
# x = self.norm(x)
# return x
| pytorch-image-models/timm/layers/patch_embed.py/0 | {
"file_path": "pytorch-image-models/timm/layers/patch_embed.py",
"repo_id": "pytorch-image-models",
"token_count": 4705
} | 197 |
from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel
from .binary_cross_entropy import BinaryCrossEntropy
from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from .jsd import JsdCrossEntropy
| pytorch-image-models/timm/loss/__init__.py/0 | {
"file_path": "pytorch-image-models/timm/loss/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 70
} | 198 |
import os
import pkgutil
from copy import deepcopy
from torch import nn as nn
from timm.layers import Conv2dSame, BatchNormAct2d, Linear
__all__ = ['extract_layer', 'set_layer', 'adapt_model_from_string', 'adapt_model_from_file']
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
if not hasattr(model, 'module') and layer[0] == 'module':
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module
def set_layer(model, layer, val):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
lst_index = 0
module2 = module
for l in layer:
if hasattr(module2, l):
if not l.isdigit():
module2 = getattr(module2, l)
else:
module2 = module2[int(l)]
lst_index += 1
lst_index -= 1
for l in layer[:lst_index]:
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
l = layer[lst_index]
setattr(module, l, val)
def adapt_model_from_string(parent_module, model_string):
separator = '***'
state_dict = {}
lst_shape = model_string.split(separator)
for k in lst_shape:
k = k.split(':')
key = k[0]
shape = k[1][1:-1].split(',')
if shape[0] != '':
state_dict[key] = [int(i) for i in shape]
new_module = deepcopy(parent_module)
for n, m in parent_module.named_modules():
old_module = extract_layer(parent_module, n)
if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
if isinstance(old_module, Conv2dSame):
conv = Conv2dSame
else:
conv = nn.Conv2d
s = state_dict[n + '.weight']
in_channels = s[1]
out_channels = s[0]
g = 1
if old_module.groups > 1:
in_channels = out_channels
g = in_channels
new_conv = conv(
in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
groups=g, stride=old_module.stride)
set_layer(new_module, n, new_conv)
elif isinstance(old_module, BatchNormAct2d):
new_bn = BatchNormAct2d(
state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
new_bn.drop = old_module.drop
new_bn.act = old_module.act
set_layer(new_module, n, new_bn)
elif isinstance(old_module, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(
num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
set_layer(new_module, n, new_bn)
elif isinstance(old_module, nn.Linear):
# FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
num_features = state_dict[n + '.weight'][1]
new_fc = Linear(
in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
set_layer(new_module, n, new_fc)
if hasattr(new_module, 'num_features'):
new_module.num_features = num_features
new_module.eval()
parent_module.eval()
return new_module
def adapt_model_from_file(parent_module, model_variant):
adapt_data = pkgutil.get_data(__name__, os.path.join('_pruned', model_variant + '.txt'))
return adapt_model_from_string(parent_module, adapt_data.decode('utf-8').strip())
| pytorch-image-models/timm/models/_prune.py/0 | {
"file_path": "pytorch-image-models/timm/models/_prune.py",
"repo_id": "pytorch-image-models",
"token_count": 2021
} | 199 |
"""PyTorch CspNet
A PyTorch implementation of Cross Stage Partial Networks including:
* CSPResNet50
* CSPResNeXt50
* CSPDarkNet53
* and DarkNet53 for good measure
Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929
Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks
Hacked together by / Copyright 2020 Ross Wightman
"""
from dataclasses import dataclass, asdict, replace
from functools import partial
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, ConvNormAct, ConvNormActAa, DropPath, get_attn, create_act_layer, make_divisible
from ._builder import build_model_with_cfg
from ._manipulate import named_apply, MATCH_PREV_GROUP
from ._registry import register_model, generate_default_cfgs
__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this
@dataclass
class CspStemCfg:
out_chs: Union[int, Tuple[int, ...]] = 32
stride: Union[int, Tuple[int, ...]] = 2
kernel_size: int = 3
padding: Union[int, str] = ''
pool: Optional[str] = ''
def _pad_arg(x, n):
# pads an argument tuple to specified n by padding with last value
if not isinstance(x, (tuple, list)):
x = (x,)
curr_n = len(x)
pad_n = n - curr_n
if pad_n <= 0:
return x[:n]
return tuple(x + (x[-1],) * pad_n)
@dataclass
class CspStagesCfg:
depth: Tuple[int, ...] = (3, 3, 5, 2) # block depth (number of block repeats in stages)
out_chs: Tuple[int, ...] = (128, 256, 512, 1024) # number of output channels for blocks in stage
stride: Union[int, Tuple[int, ...]] = 2 # stride of stage
groups: Union[int, Tuple[int, ...]] = 1 # num kxk conv groups
block_ratio: Union[float, Tuple[float, ...]] = 1.0
bottle_ratio: Union[float, Tuple[float, ...]] = 1. # bottleneck-ratio of blocks in stage
avg_down: Union[bool, Tuple[bool, ...]] = False
attn_layer: Optional[Union[str, Tuple[str, ...]]] = None
attn_kwargs: Optional[Union[Dict, Tuple[Dict]]] = None
stage_type: Union[str, Tuple[str]] = 'csp' # stage type ('csp', 'cs2', 'dark')
block_type: Union[str, Tuple[str]] = 'bottle' # blocks type for stages ('bottle', 'dark')
# cross-stage only
expand_ratio: Union[float, Tuple[float, ...]] = 1.0
cross_linear: Union[bool, Tuple[bool, ...]] = False
down_growth: Union[bool, Tuple[bool, ...]] = False
def __post_init__(self):
n = len(self.depth)
assert len(self.out_chs) == n
self.stride = _pad_arg(self.stride, n)
self.groups = _pad_arg(self.groups, n)
self.block_ratio = _pad_arg(self.block_ratio, n)
self.bottle_ratio = _pad_arg(self.bottle_ratio, n)
self.avg_down = _pad_arg(self.avg_down, n)
self.attn_layer = _pad_arg(self.attn_layer, n)
self.attn_kwargs = _pad_arg(self.attn_kwargs, n)
self.stage_type = _pad_arg(self.stage_type, n)
self.block_type = _pad_arg(self.block_type, n)
self.expand_ratio = _pad_arg(self.expand_ratio, n)
self.cross_linear = _pad_arg(self.cross_linear, n)
self.down_growth = _pad_arg(self.down_growth, n)
@dataclass
class CspModelCfg:
stem: CspStemCfg
stages: CspStagesCfg
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
act_layer: str = 'leaky_relu'
norm_layer: str = 'batchnorm'
aa_layer: Optional[str] = None # FIXME support string factory for this
def _cs3_cfg(
width_multiplier=1.0,
depth_multiplier=1.0,
avg_down=False,
act_layer='silu',
focus=False,
attn_layer=None,
attn_kwargs=None,
bottle_ratio=1.0,
block_type='dark',
):
if focus:
stem_cfg = CspStemCfg(
out_chs=make_divisible(64 * width_multiplier),
kernel_size=6, stride=2, padding=2, pool='')
else:
stem_cfg = CspStemCfg(
out_chs=tuple([make_divisible(c * width_multiplier) for c in (32, 64)]),
kernel_size=3, stride=2, pool='')
return CspModelCfg(
stem=stem_cfg,
stages=CspStagesCfg(
out_chs=tuple([make_divisible(c * width_multiplier) for c in (128, 256, 512, 1024)]),
depth=tuple([int(d * depth_multiplier) for d in (3, 6, 9, 3)]),
stride=2,
bottle_ratio=bottle_ratio,
block_ratio=0.5,
avg_down=avg_down,
attn_layer=attn_layer,
attn_kwargs=attn_kwargs,
stage_type='cs3',
block_type=block_type,
),
act_layer=act_layer,
)
class BottleneckBlock(nn.Module):
""" ResNe(X)t Bottleneck Block
"""
def __init__(
self,
in_chs,
out_chs,
dilation=1,
bottle_ratio=0.25,
groups=1,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_last=False,
attn_layer=None,
drop_block=None,
drop_path=0.
):
super(BottleneckBlock, self).__init__()
mid_chs = int(round(out_chs * bottle_ratio))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
attn_last = attn_layer is not None and attn_last
attn_first = attn_layer is not None and not attn_last
self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs)
self.conv2 = ConvNormAct(
mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups,
drop_layer=drop_block, **ckwargs)
self.attn2 = attn_layer(mid_chs, act_layer=act_layer) if attn_first else nn.Identity()
self.conv3 = ConvNormAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs)
self.attn3 = attn_layer(out_chs, act_layer=act_layer) if attn_last else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
self.act3 = create_act_layer(act_layer)
def zero_init_last(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.attn2(x)
x = self.conv3(x)
x = self.attn3(x)
x = self.drop_path(x) + shortcut
# FIXME partial shortcut needed if first block handled as per original, not used for my current impl
#x[:, :shortcut.size(1)] += shortcut
x = self.act3(x)
return x
class DarkBlock(nn.Module):
""" DarkNet Block
"""
def __init__(
self,
in_chs,
out_chs,
dilation=1,
bottle_ratio=0.5,
groups=1,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
drop_block=None,
drop_path=0.
):
super(DarkBlock, self).__init__()
mid_chs = int(round(out_chs * bottle_ratio))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs)
self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity()
self.conv2 = ConvNormAct(
mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups,
drop_layer=drop_block, **ckwargs)
self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.attn(x)
x = self.conv2(x)
x = self.drop_path(x) + shortcut
return x
class EdgeBlock(nn.Module):
""" EdgeResidual / Fused-MBConv / MobileNetV1-like 3x3 + 1x1 block (w/ activated output)
"""
def __init__(
self,
in_chs,
out_chs,
dilation=1,
bottle_ratio=0.5,
groups=1,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
drop_block=None,
drop_path=0.
):
super(EdgeBlock, self).__init__()
mid_chs = int(round(out_chs * bottle_ratio))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
self.conv1 = ConvNormAct(
in_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups,
drop_layer=drop_block, **ckwargs)
self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity()
self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=1, **ckwargs)
self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.attn(x)
x = self.conv2(x)
x = self.drop_path(x) + shortcut
return x
class CrossStage(nn.Module):
"""Cross Stage."""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
block_ratio=1.,
bottle_ratio=1.,
expand_ratio=1.,
groups=1,
first_dilation=None,
avg_down=False,
down_growth=False,
cross_linear=False,
block_dpr=None,
block_fn=BottleneckBlock,
**block_kwargs,
):
super(CrossStage, self).__init__()
first_dilation = first_dilation or dilation
down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels
self.expand_chs = exp_chs = int(round(out_chs * expand_ratio))
block_out_chs = int(round(out_chs * block_ratio))
conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'))
aa_layer = block_kwargs.pop('aa_layer', None)
if stride != 1 or first_dilation != dilation:
if avg_down:
self.conv_down = nn.Sequential(
nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling
ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)
)
else:
self.conv_down = ConvNormActAa(
in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups,
aa_layer=aa_layer, **conv_kwargs)
prev_chs = down_chs
else:
self.conv_down = nn.Identity()
prev_chs = in_chs
# FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also,
# there is also special case for the first stage for some of the model that results in uneven split
# across the two paths. I did it this way for simplicity for now.
self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs)
prev_chs = exp_chs // 2 # output of conv_exp is always split in two
self.blocks = nn.Sequential()
for i in range(depth):
self.blocks.add_module(str(i), block_fn(
in_chs=prev_chs,
out_chs=block_out_chs,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
drop_path=block_dpr[i] if block_dpr is not None else 0.,
**block_kwargs,
))
prev_chs = block_out_chs
# transition convs
self.conv_transition_b = ConvNormAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs)
self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs)
def forward(self, x):
x = self.conv_down(x)
x = self.conv_exp(x)
xs, xb = x.split(self.expand_chs // 2, dim=1)
xb = self.blocks(xb)
xb = self.conv_transition_b(xb).contiguous()
out = self.conv_transition(torch.cat([xs, xb], dim=1))
return out
class CrossStage3(nn.Module):
"""Cross Stage 3.
Similar to CrossStage, but with only one transition conv for the output.
"""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
block_ratio=1.,
bottle_ratio=1.,
expand_ratio=1.,
groups=1,
first_dilation=None,
avg_down=False,
down_growth=False,
cross_linear=False,
block_dpr=None,
block_fn=BottleneckBlock,
**block_kwargs,
):
super(CrossStage3, self).__init__()
first_dilation = first_dilation or dilation
down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels
self.expand_chs = exp_chs = int(round(out_chs * expand_ratio))
block_out_chs = int(round(out_chs * block_ratio))
conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'))
aa_layer = block_kwargs.pop('aa_layer', None)
if stride != 1 or first_dilation != dilation:
if avg_down:
self.conv_down = nn.Sequential(
nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling
ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)
)
else:
self.conv_down = ConvNormActAa(
in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups,
aa_layer=aa_layer, **conv_kwargs)
prev_chs = down_chs
else:
self.conv_down = None
prev_chs = in_chs
# expansion conv
self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs)
prev_chs = exp_chs // 2 # expanded output is split in 2 for blocks and cross stage
self.blocks = nn.Sequential()
for i in range(depth):
self.blocks.add_module(str(i), block_fn(
in_chs=prev_chs,
out_chs=block_out_chs,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
drop_path=block_dpr[i] if block_dpr is not None else 0.,
**block_kwargs,
))
prev_chs = block_out_chs
# transition convs
self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs)
def forward(self, x):
x = self.conv_down(x)
x = self.conv_exp(x)
x1, x2 = x.split(self.expand_chs // 2, dim=1)
x1 = self.blocks(x1)
out = self.conv_transition(torch.cat([x1, x2], dim=1))
return out
class DarkStage(nn.Module):
"""DarkNet stage."""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
block_ratio=1.,
bottle_ratio=1.,
groups=1,
first_dilation=None,
avg_down=False,
block_fn=BottleneckBlock,
block_dpr=None,
**block_kwargs,
):
super(DarkStage, self).__init__()
first_dilation = first_dilation or dilation
conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'))
aa_layer = block_kwargs.pop('aa_layer', None)
if avg_down:
self.conv_down = nn.Sequential(
nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling
ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)
)
else:
self.conv_down = ConvNormActAa(
in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups,
aa_layer=aa_layer, **conv_kwargs)
prev_chs = out_chs
block_out_chs = int(round(out_chs * block_ratio))
self.blocks = nn.Sequential()
for i in range(depth):
self.blocks.add_module(str(i), block_fn(
in_chs=prev_chs,
out_chs=block_out_chs,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
drop_path=block_dpr[i] if block_dpr is not None else 0.,
**block_kwargs
))
prev_chs = block_out_chs
def forward(self, x):
x = self.conv_down(x)
x = self.blocks(x)
return x
def create_csp_stem(
in_chans=3,
out_chs=32,
kernel_size=3,
stride=2,
pool='',
padding='',
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
aa_layer=None,
):
stem = nn.Sequential()
feature_info = []
if not isinstance(out_chs, (tuple, list)):
out_chs = [out_chs]
stem_depth = len(out_chs)
assert stem_depth
assert stride in (1, 2, 4)
prev_feat = None
prev_chs = in_chans
last_idx = stem_depth - 1
stem_stride = 1
for i, chs in enumerate(out_chs):
conv_name = f'conv{i + 1}'
conv_stride = 2 if (i == 0 and stride > 1) or (i == last_idx and stride > 2 and not pool) else 1
if conv_stride > 1 and prev_feat is not None:
feature_info.append(prev_feat)
stem.add_module(conv_name, ConvNormAct(
prev_chs, chs, kernel_size,
stride=conv_stride,
padding=padding if i == 0 else '',
act_layer=act_layer,
norm_layer=norm_layer,
))
stem_stride *= conv_stride
prev_chs = chs
prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', conv_name]))
if pool:
assert stride > 2
if prev_feat is not None:
feature_info.append(prev_feat)
if aa_layer is not None:
stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
stem.add_module('aa', aa_layer(channels=prev_chs, stride=2))
pool_name = 'aa'
else:
stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
pool_name = 'pool'
stem_stride *= 2
prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', pool_name]))
feature_info.append(prev_feat)
return stem, feature_info
def _get_stage_fn(stage_args):
stage_type = stage_args.pop('stage_type')
assert stage_type in ('dark', 'csp', 'cs3')
if stage_type == 'dark':
stage_args.pop('expand_ratio', None)
stage_args.pop('cross_linear', None)
stage_args.pop('down_growth', None)
stage_fn = DarkStage
elif stage_type == 'csp':
stage_fn = CrossStage
else:
stage_fn = CrossStage3
return stage_fn, stage_args
def _get_block_fn(stage_args):
block_type = stage_args.pop('block_type')
assert block_type in ('dark', 'edge', 'bottle')
if block_type == 'dark':
return DarkBlock, stage_args
elif block_type == 'edge':
return EdgeBlock, stage_args
else:
return BottleneckBlock, stage_args
def _get_attn_fn(stage_args):
attn_layer = stage_args.pop('attn_layer')
attn_kwargs = stage_args.pop('attn_kwargs', None) or {}
if attn_layer is not None:
attn_layer = get_attn(attn_layer)
if attn_kwargs:
attn_layer = partial(attn_layer, **attn_kwargs)
return attn_layer, stage_args
def create_csp_stages(
cfg: CspModelCfg,
drop_path_rate: float,
output_stride: int,
stem_feat: Dict[str, Any],
):
cfg_dict = asdict(cfg.stages)
num_stages = len(cfg.stages.depth)
cfg_dict['block_dpr'] = [None] * num_stages if not drop_path_rate else \
[x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.stages.depth)).split(cfg.stages.depth)]
stage_args = [dict(zip(cfg_dict.keys(), values)) for values in zip(*cfg_dict.values())]
block_kwargs = dict(
act_layer=cfg.act_layer,
norm_layer=cfg.norm_layer,
)
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
feature_info = []
stages = []
for stage_idx, stage_args in enumerate(stage_args):
stage_fn, stage_args = _get_stage_fn(stage_args)
block_fn, stage_args = _get_block_fn(stage_args)
attn_fn, stage_args = _get_attn_fn(stage_args)
stride = stage_args.pop('stride')
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
stages += [stage_fn(
prev_chs,
**stage_args,
stride=stride,
first_dilation=first_dilation,
dilation=dilation,
block_fn=block_fn,
aa_layer=cfg.aa_layer,
attn_layer=attn_fn, # will be passed through stage as block_kwargs
**block_kwargs,
)]
prev_chs = stage_args['out_chs']
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
class CspNet(nn.Module):
"""Cross Stage Partial base model.
Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929
Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks
NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the
darknet impl. I did it this way for simplicity and less special cases.
"""
def __init__(
self,
cfg: CspModelCfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.,
drop_path_rate=0.,
zero_init_last=True,
**kwargs,
):
"""
Args:
cfg (CspModelCfg): Model architecture configuration
in_chans (int): Number of input channels (default: 3)
num_classes (int): Number of classifier classes (default: 1000)
output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)
global_pool (str): Global pooling type (default: 'avg')
drop_rate (float): Dropout rate (default: 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)
zero_init_last (bool): Zero-init last weight of residual path
kwargs (dict): Extra kwargs overlayed onto cfg
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride in (8, 16, 32)
cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg
layer_args = dict(
act_layer=cfg.act_layer,
norm_layer=cfg.norm_layer,
aa_layer=cfg.aa_layer
)
self.feature_info = []
# Construct the stem
self.stem, stem_feat_info = create_csp_stem(in_chans, **asdict(cfg.stem), **layer_args)
self.feature_info.extend(stem_feat_info[:-1])
# Construct the stages
self.stages, stage_feat_info = create_csp_stages(
cfg,
drop_path_rate=drop_path_rate,
output_stride=output_stride,
stem_feat=stem_feat_info[-1],
)
prev_chs = stage_feat_info[-1]['num_chs']
self.feature_info.extend(stage_feat_info)
# Construct the head
self.num_features = prev_chs
self.head = ClassifierHead(
in_features=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate)
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^stages\.(\d+)\..*transition', MATCH_PREV_GROUP), # map to last block in stage
(r'^stages\.(\d+)', (0,)),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name, zero_init_last=False):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif zero_init_last and hasattr(module, 'zero_init_last'):
module.zero_init_last()
model_cfgs = dict(
cspresnet50=CspModelCfg(
stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(128, 256, 512, 1024),
stride=(1, 2),
expand_ratio=2.,
bottle_ratio=0.5,
cross_linear=True,
),
),
cspresnet50d=CspModelCfg(
stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(128, 256, 512, 1024),
stride=(1,) + (2,),
expand_ratio=2.,
bottle_ratio=0.5,
block_ratio=1.,
cross_linear=True,
),
),
cspresnet50w=CspModelCfg(
stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(256, 512, 1024, 2048),
stride=(1,) + (2,),
expand_ratio=1.,
bottle_ratio=0.25,
block_ratio=0.5,
cross_linear=True,
),
),
cspresnext50=CspModelCfg(
stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(256, 512, 1024, 2048),
stride=(1,) + (2,),
groups=32,
expand_ratio=1.,
bottle_ratio=1.,
block_ratio=0.5,
cross_linear=True,
),
),
cspdarknet53=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 2, 8, 8, 4),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
expand_ratio=(2.,) + (1.,),
bottle_ratio=(0.5,) + (1.,),
block_ratio=(1.,) + (0.5,),
down_growth=True,
block_type='dark',
),
),
darknet17=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1,) * 5,
out_chs=(64, 128, 256, 512, 1024),
stride=(2,),
bottle_ratio=(0.5,),
block_ratio=(1.,),
stage_type='dark',
block_type='dark',
),
),
darknet21=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 1, 1, 2, 2),
out_chs=(64, 128, 256, 512, 1024),
stride=(2,),
bottle_ratio=(0.5,),
block_ratio=(1.,),
stage_type='dark',
block_type='dark',
),
),
sedarknet21=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 1, 1, 2, 2),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
bottle_ratio=0.5,
block_ratio=1.,
attn_layer='se',
stage_type='dark',
block_type='dark',
),
),
darknet53=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 2, 8, 8, 4),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
bottle_ratio=0.5,
block_ratio=1.,
stage_type='dark',
block_type='dark',
),
),
darknetaa53=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 2, 8, 8, 4),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
bottle_ratio=0.5,
block_ratio=1.,
avg_down=True,
stage_type='dark',
block_type='dark',
),
),
cs3darknet_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5),
cs3darknet_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67),
cs3darknet_l=_cs3_cfg(),
cs3darknet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33),
cs3darknet_focus_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5, focus=True),
cs3darknet_focus_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67, focus=True),
cs3darknet_focus_l=_cs3_cfg(focus=True),
cs3darknet_focus_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, focus=True),
cs3sedarknet_l=_cs3_cfg(attn_layer='se', attn_kwargs=dict(rd_ratio=.25)),
cs3sedarknet_x=_cs3_cfg(attn_layer='se', width_multiplier=1.25, depth_multiplier=1.33),
cs3sedarknet_xdw=CspModelCfg(
stem=CspStemCfg(out_chs=(32, 64), kernel_size=3, stride=2, pool=''),
stages=CspStagesCfg(
depth=(3, 6, 12, 4),
out_chs=(256, 512, 1024, 2048),
stride=2,
groups=(1, 1, 256, 512),
bottle_ratio=0.5,
block_ratio=0.5,
attn_layer='se',
),
act_layer='silu',
),
cs3edgenet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge'),
cs3se_edgenet_x=_cs3_cfg(
width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge',
attn_layer='se', attn_kwargs=dict(rd_ratio=.25)),
)
def _create_cspnet(variant, pretrained=False, **kwargs):
if variant.startswith('darknet') or variant.startswith('cspdarknet'):
# NOTE: DarkNet is one of few models with stride==1 features w/ 6 out_indices [0..5]
default_out_indices = (0, 1, 2, 3, 4, 5)
else:
default_out_indices = (0, 1, 2, 3, 4)
out_indices = kwargs.pop('out_indices', default_out_indices)
return build_model_with_cfg(
CspNet, variant, pretrained,
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.887, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'cspresnet50.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'),
'cspresnet50d.untrained': _cfg(),
'cspresnet50w.untrained': _cfg(),
'cspresnext50.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth',
),
'cspdarknet53.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'),
'darknet17.untrained': _cfg(),
'darknet21.untrained': _cfg(),
'sedarknet21.untrained': _cfg(),
'darknet53.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknet53_256_c2ns-3aeff817.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0),
'darknetaa53.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknetaa53_c2ns-5c28ec8a.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3darknet_s.untrained': _cfg(interpolation='bicubic'),
'cs3darknet_m.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_m_c2ns-43f06604.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95,
),
'cs3darknet_l.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_l_c2ns-16220c5d.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3darknet_x.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_x_c2ns-4e4490aa.pth',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3darknet_focus_s.untrained': _cfg(interpolation='bicubic'),
'cs3darknet_focus_m.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_m_c2ns-e23bed41.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3darknet_focus_l.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_l_c2ns-65ef8888.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3darknet_focus_x.untrained': _cfg(interpolation='bicubic'),
'cs3sedarknet_l.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_l_c2ns-e8d1dc13.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3sedarknet_x.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_x_c2ns-b4d0abc0.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3sedarknet_xdw.untrained': _cfg(interpolation='bicubic'),
'cs3edgenet_x.c2_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3edgenet_x_c2-2e1610a9.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3se_edgenet_x.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3se_edgenet_x_c2ns-76f8e3ac.pth',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0),
})
@register_model
def cspresnet50(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs)
@register_model
def cspresnet50d(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs)
@register_model
def cspresnet50w(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs)
@register_model
def cspresnext50(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs)
@register_model
def cspdarknet53(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspdarknet53', pretrained=pretrained, **kwargs)
@register_model
def darknet17(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknet17', pretrained=pretrained, **kwargs)
@register_model
def darknet21(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknet21', pretrained=pretrained, **kwargs)
@register_model
def sedarknet21(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('sedarknet21', pretrained=pretrained, **kwargs)
@register_model
def darknet53(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknet53', pretrained=pretrained, **kwargs)
@register_model
def darknetaa53(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknetaa53', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_s(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_s', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_m(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_m', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_l(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_l', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_x', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_s(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_s', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_m(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_m', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_l(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_l', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_x', pretrained=pretrained, **kwargs)
@register_model
def cs3sedarknet_l(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3sedarknet_l', pretrained=pretrained, **kwargs)
@register_model
def cs3sedarknet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3sedarknet_x', pretrained=pretrained, **kwargs)
@register_model
def cs3sedarknet_xdw(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3sedarknet_xdw', pretrained=pretrained, **kwargs)
@register_model
def cs3edgenet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3edgenet_x', pretrained=pretrained, **kwargs)
@register_model
def cs3se_edgenet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3se_edgenet_x', pretrained=pretrained, **kwargs) | pytorch-image-models/timm/models/cspnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/cspnet.py",
"repo_id": "pytorch-image-models",
"token_count": 19954
} | 200 |
""" FocalNet
As described in `Focal Modulation Networks` - https://arxiv.org/abs/2203.11926
Significant modifications and refactoring from the original impl at https://github.com/microsoft/FocalNet
This impl is/has:
* fully convolutional, NCHW tensor layout throughout, seemed to have minimal performance impact but more flexible
* re-ordered downsample / layer so that striding always at beginning of layer (stage)
* no input size constraints or input resolution/H/W tracking through the model
* torchscript fixed and a number of quirks cleaned up
* feature extraction support via `features_only=True`
"""
# --------------------------------------------------------
# FocalNets -- Focal Modulation Networks
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Jianwei Yang ([email protected])
# --------------------------------------------------------
from functools import partial
from typing import Callable, Optional, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, LayerNorm2d, trunc_normal_, ClassifierHead, NormMlpClassifierHead
from ._builder import build_model_with_cfg
from ._manipulate import named_apply
from ._registry import generate_default_cfgs, register_model
__all__ = ['FocalNet']
class FocalModulation(nn.Module):
def __init__(
self,
dim: int,
focal_window,
focal_level: int,
focal_factor: int = 2,
bias: bool = True,
use_post_norm: bool = False,
normalize_modulator: bool = False,
proj_drop: float = 0.,
norm_layer: Callable = LayerNorm2d,
):
super().__init__()
self.dim = dim
self.focal_window = focal_window
self.focal_level = focal_level
self.focal_factor = focal_factor
self.use_post_norm = use_post_norm
self.normalize_modulator = normalize_modulator
self.input_split = [dim, dim, self.focal_level + 1]
self.f = nn.Conv2d(dim, 2 * dim + (self.focal_level + 1), kernel_size=1, bias=bias)
self.h = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)
self.act = nn.GELU()
self.proj = nn.Conv2d(dim, dim, kernel_size=1)
self.proj_drop = nn.Dropout(proj_drop)
self.focal_layers = nn.ModuleList()
self.kernel_sizes = []
for k in range(self.focal_level):
kernel_size = self.focal_factor * k + self.focal_window
self.focal_layers.append(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=dim, padding=kernel_size // 2, bias=False),
nn.GELU(),
))
self.kernel_sizes.append(kernel_size)
self.norm = norm_layer(dim) if self.use_post_norm else nn.Identity()
def forward(self, x):
# pre linear projection
x = self.f(x)
q, ctx, gates = torch.split(x, self.input_split, 1)
# context aggreation
ctx_all = 0
for l, focal_layer in enumerate(self.focal_layers):
ctx = focal_layer(ctx)
ctx_all = ctx_all + ctx * gates[:, l:l + 1]
ctx_global = self.act(ctx.mean((2, 3), keepdim=True))
ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:]
# normalize context
if self.normalize_modulator:
ctx_all = ctx_all / (self.focal_level + 1)
# focal modulation
x_out = q * self.h(ctx_all)
x_out = self.norm(x_out)
# post linear projection
x_out = self.proj(x_out)
x_out = self.proj_drop(x_out)
return x_out
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class FocalNetBlock(nn.Module):
""" Focal Modulation Network Block.
"""
def __init__(
self,
dim: int,
mlp_ratio: float = 4.,
focal_level: int = 1,
focal_window: int = 3,
use_post_norm: bool = False,
use_post_norm_in_modulation: bool = False,
normalize_modulator: bool = False,
layerscale_value: float = 1e-4,
proj_drop: float = 0.,
drop_path: float = 0.,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm2d,
):
"""
Args:
dim: Number of input channels.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
focal_level: Number of focal levels.
focal_window: Focal window size at first focal level.
use_post_norm: Whether to use layer norm after modulation.
use_post_norm_in_modulation: Whether to use layer norm in modulation.
layerscale_value: Initial layerscale value.
proj_drop: Dropout rate.
drop_path: Stochastic depth rate.
act_layer: Activation layer.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.mlp_ratio = mlp_ratio
self.focal_window = focal_window
self.focal_level = focal_level
self.use_post_norm = use_post_norm
self.norm1 = norm_layer(dim) if not use_post_norm else nn.Identity()
self.modulation = FocalModulation(
dim,
focal_window=focal_window,
focal_level=self.focal_level,
use_post_norm=use_post_norm_in_modulation,
normalize_modulator=normalize_modulator,
proj_drop=proj_drop,
norm_layer=norm_layer,
)
self.norm1_post = norm_layer(dim) if use_post_norm else nn.Identity()
self.ls1 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim) if not use_post_norm else nn.Identity()
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
use_conv=True,
)
self.norm2_post = norm_layer(dim) if use_post_norm else nn.Identity()
self.ls2 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
# Focal Modulation
x = self.norm1(x)
x = self.modulation(x)
x = self.norm1_post(x)
x = shortcut + self.drop_path1(self.ls1(x))
# FFN
x = x + self.drop_path2(self.ls2(self.norm2_post(self.mlp(self.norm2(x)))))
return x
class FocalNetStage(nn.Module):
""" A basic Focal Transformer layer for one stage.
"""
def __init__(
self,
dim: int,
out_dim: int,
depth: int,
mlp_ratio: float = 4.,
downsample: bool = True,
focal_level: int = 1,
focal_window: int = 1,
use_overlap_down: bool = False,
use_post_norm: bool = False,
use_post_norm_in_modulation: bool = False,
normalize_modulator: bool = False,
layerscale_value: float = 1e-4,
proj_drop: float = 0.,
drop_path: float = 0.,
norm_layer: Callable = LayerNorm2d,
):
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels.
depth: Number of blocks.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
downsample: Downsample layer at start of the layer.
focal_level: Number of focal levels
focal_window: Focal window size at first focal level
use_overlap_down: User overlapped convolution in downsample layer.
use_post_norm: Whether to use layer norm after modulation.
use_post_norm_in_modulation: Whether to use layer norm in modulation.
layerscale_value: Initial layerscale value
proj_drop: Dropout rate for projections.
drop_path: Stochastic depth rate.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.depth = depth
self.grad_checkpointing = False
if downsample:
self.downsample = Downsample(
in_chs=dim,
out_chs=out_dim,
stride=2,
overlap=use_overlap_down,
norm_layer=norm_layer,
)
else:
self.downsample = nn.Identity()
# build blocks
self.blocks = nn.ModuleList([
FocalNetBlock(
dim=out_dim,
mlp_ratio=mlp_ratio,
focal_level=focal_level,
focal_window=focal_window,
use_post_norm=use_post_norm,
use_post_norm_in_modulation=use_post_norm_in_modulation,
normalize_modulator=normalize_modulator,
layerscale_value=layerscale_value,
proj_drop=proj_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)])
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x):
x = self.downsample(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
return x
class Downsample(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 4,
overlap: bool = False,
norm_layer: Optional[Callable] = None,
):
"""
Args:
in_chs: Number of input image channels.
out_chs: Number of linear projection output channels.
stride: Downsample stride.
overlap: Use overlapping convolutions if True.
norm_layer: Normalization layer.
"""
super().__init__()
self.stride = stride
padding = 0
kernel_size = stride
if overlap:
assert stride in (2, 4)
if stride == 4:
kernel_size, padding = 7, 2
elif stride == 2:
kernel_size, padding = 3, 1
self.proj = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm = norm_layer(out_chs) if norm_layer is not None else nn.Identity()
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
return x
class FocalNet(nn.Module):
"""" Focal Modulation Networks (FocalNets)
"""
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
mlp_ratio: float = 4.,
focal_levels: Tuple[int, ...] = (2, 2, 2, 2),
focal_windows: Tuple[int, ...] = (3, 3, 3, 3),
use_overlap_down: bool = False,
use_post_norm: bool = False,
use_post_norm_in_modulation: bool = False,
normalize_modulator: bool = False,
head_hidden_size: Optional[int] = None,
head_init_scale: float = 1.0,
layerscale_value: Optional[float] = None,
drop_rate: bool = 0.,
proj_drop_rate: bool = 0.,
drop_path_rate: bool = 0.1,
norm_layer: Callable = partial(LayerNorm2d, eps=1e-5),
):
"""
Args:
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
embed_dim: Patch embedding dimension.
depths: Depth of each Focal Transformer layer.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
focal_levels: How many focal levels at all stages. Note that this excludes the finest-grain level.
focal_windows: The focal window size at all stages.
use_overlap_down: Whether to use convolutional embedding.
use_post_norm: Whether to use layernorm after modulation (it helps stablize training of large models)
layerscale_value: Value for layer scale.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
norm_layer: Normalization layer.
"""
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.num_features = embed_dim[-1]
self.feature_info = []
self.stem = Downsample(
in_chs=in_chans,
out_chs=embed_dim[0],
overlap=use_overlap_down,
norm_layer=norm_layer,
)
in_dim = embed_dim[0]
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
layers = []
for i_layer in range(self.num_layers):
out_dim = embed_dim[i_layer]
layer = FocalNetStage(
dim=in_dim,
out_dim=out_dim,
depth=depths[i_layer],
mlp_ratio=mlp_ratio,
downsample=i_layer > 0,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_overlap_down=use_overlap_down,
use_post_norm=use_post_norm,
use_post_norm_in_modulation=use_post_norm_in_modulation,
normalize_modulator=normalize_modulator,
layerscale_value=layerscale_value,
proj_drop=proj_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
)
in_dim = out_dim
layers += [layer]
self.feature_info += [dict(num_chs=out_dim, reduction=4 * 2 ** i_layer, module=f'layers.{i_layer}')]
self.layers = nn.Sequential(*layers)
if head_hidden_size:
self.norm = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=drop_rate,
norm_layer=norm_layer,
)
else:
self.norm = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate
)
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def no_weight_decay(self):
return {''}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=[
(r'^layers\.(\d+)', None),
(r'^norm', (99999,))
] if coarse else [
(r'^layers\.(\d+).downsample', (0,)),
(r'^layers\.(\d+)\.\w+\.(\d+)', None),
(r'^norm', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for l in self.layers:
l.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.layers(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
if name and 'head.fc' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.proj', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
"focalnet_tiny_srf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_small_srf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_base_srf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_tiny_lrf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_small_lrf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_base_lrf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_large_fl3.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_large_fl4.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_xlarge_fl3.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_xlarge_fl4.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_huge_fl3.ms_in22k": _cfg(
hf_hub_id='timm/',
num_classes=21842),
"focalnet_huge_fl4.ms_in22k": _cfg(
hf_hub_id='timm/',
num_classes=0),
})
def checkpoint_filter_fn(state_dict, model: FocalNet):
state_dict = state_dict.get('model', state_dict)
if 'stem.proj.weight' in state_dict:
return state_dict
import re
out_dict = {}
dest_dict = model.state_dict()
for k, v in state_dict.items():
k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k)
k = k.replace('patch_embed', 'stem')
k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
if 'norm' in k and k not in dest_dict:
k = re.sub(r'norm([0-9])', r'norm\1_post', k)
k = k.replace('ln.', 'norm.')
k = k.replace('head', 'head.fc')
if k in dest_dict and dest_dict[k].numel() == v.numel() and dest_dict[k].shape != v.shape:
v = v.reshape(dest_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_focalnet(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
FocalNet, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
@register_model
def focalnet_tiny_srf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, **kwargs)
return _create_focalnet('focalnet_tiny_srf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_small_srf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, **kwargs)
return _create_focalnet('focalnet_small_srf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_base_srf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, **kwargs)
return _create_focalnet('focalnet_base_srf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_tiny_lrf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs)
return _create_focalnet('focalnet_tiny_lrf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_small_lrf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs)
return _create_focalnet('focalnet_small_lrf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_base_lrf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, focal_levels=[3, 3, 3, 3], **kwargs)
return _create_focalnet('focalnet_base_lrf', pretrained=pretrained, **model_kwargs)
# FocalNet large+ models
@register_model
def focalnet_large_fl3(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4,
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_large_fl3', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_large_fl4(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[4, 4, 4, 4],
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_large_fl4', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_xlarge_fl3(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4,
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_xlarge_fl3', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_xlarge_fl4(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[4, 4, 4, 4],
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_xlarge_fl4', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_huge_fl3(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[3, 3, 3, 3], focal_windows=[3] * 4,
use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_huge_fl3', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_huge_fl4(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[4, 4, 4, 4],
use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_huge_fl4', pretrained=pretrained, **model_kwargs)
| pytorch-image-models/timm/models/focalnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/focalnet.py",
"repo_id": "pytorch-image-models",
"token_count": 11585
} | 201 |
"""
Poolformer from MetaFormer is Actually What You Need for Vision https://arxiv.org/abs/2111.11418
IdentityFormer, RandFormer, PoolFormerV2, ConvFormer, and CAFormer
from MetaFormer Baselines for Vision https://arxiv.org/abs/2210.13452
All implemented models support feature extraction and variable input resolution.
Original implementation by Weihao Yu et al.,
adapted for timm by Fredo Guan and Ross Wightman.
Adapted from https://github.com/sail-sg/metaformer, original copyright below
"""
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, SelectAdaptivePool2d, GroupNorm1, LayerNorm, LayerNorm2d, Mlp, \
use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['MetaFormer']
class Stem(nn.Module):
"""
Stem implemented by a layer of convolution.
Conv2d params constant across all models.
"""
def __init__(
self,
in_channels,
out_channels,
norm_layer=None,
):
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=4,
padding=2
)
self.norm = norm_layer(out_channels) if norm_layer else nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class Downsampling(nn.Module):
"""
Downsampling implemented by a layer of convolution.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
norm_layer=None,
):
super().__init__()
self.norm = norm_layer(in_channels) if norm_layer else nn.Identity()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding
)
def forward(self, x):
x = self.norm(x)
x = self.conv(x)
return x
class Scale(nn.Module):
"""
Scale vector by element multiplications.
"""
def __init__(self, dim, init_value=1.0, trainable=True, use_nchw=True):
super().__init__()
self.shape = (dim, 1, 1) if use_nchw else (dim,)
self.scale = nn.Parameter(init_value * torch.ones(dim), requires_grad=trainable)
def forward(self, x):
return x * self.scale.view(self.shape)
class SquaredReLU(nn.Module):
"""
Squared ReLU: https://arxiv.org/abs/2109.08668
"""
def __init__(self, inplace=False):
super().__init__()
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return torch.square(self.relu(x))
class StarReLU(nn.Module):
"""
StarReLU: s * relu(x) ** 2 + b
"""
def __init__(
self,
scale_value=1.0,
bias_value=0.0,
scale_learnable=True,
bias_learnable=True,
mode=None,
inplace=False
):
super().__init__()
self.inplace = inplace
self.relu = nn.ReLU(inplace=inplace)
self.scale = nn.Parameter(scale_value * torch.ones(1), requires_grad=scale_learnable)
self.bias = nn.Parameter(bias_value * torch.ones(1), requires_grad=bias_learnable)
def forward(self, x):
return self.scale * self.relu(x) ** 2 + self.bias
class Attention(nn.Module):
"""
Vanilla self-attention from Transformer: https://arxiv.org/abs/1706.03762.
Modified from timm.
"""
fused_attn: Final[bool]
def __init__(
self,
dim,
head_dim=32,
num_heads=None,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
proj_bias=False,
**kwargs
):
super().__init__()
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.num_heads = num_heads if num_heads else dim // head_dim
if self.num_heads == 0:
self.num_heads = 1
self.attention_dim = self.num_heads * self.head_dim
self.qkv = nn.Linear(dim, self.attention_dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.attention_dim, dim, bias=proj_bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# custom norm modules that disable the bias term, since the original models defs
# used a custom norm with a weight term but no bias term.
class GroupNorm1NoBias(GroupNorm1):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNorm2dNoBias(LayerNorm2d):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNormNoBias(nn.LayerNorm):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class SepConv(nn.Module):
r"""
Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381.
"""
def __init__(
self,
dim,
expansion_ratio=2,
act1_layer=StarReLU,
act2_layer=nn.Identity,
bias=False,
kernel_size=7,
padding=3,
**kwargs
):
super().__init__()
mid_channels = int(expansion_ratio * dim)
self.pwconv1 = nn.Conv2d(dim, mid_channels, kernel_size=1, bias=bias)
self.act1 = act1_layer()
self.dwconv = nn.Conv2d(
mid_channels, mid_channels, kernel_size=kernel_size,
padding=padding, groups=mid_channels, bias=bias) # depthwise conv
self.act2 = act2_layer()
self.pwconv2 = nn.Conv2d(mid_channels, dim, kernel_size=1, bias=bias)
def forward(self, x):
x = self.pwconv1(x)
x = self.act1(x)
x = self.dwconv(x)
x = self.act2(x)
x = self.pwconv2(x)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer: https://arxiv.org/abs/2111.11418
"""
def __init__(self, pool_size=3, **kwargs):
super().__init__()
self.pool = nn.AvgPool2d(
pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, x):
y = self.pool(x)
return y - x
class MlpHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
dim,
num_classes=1000,
mlp_ratio=4,
act_layer=SquaredReLU,
norm_layer=LayerNorm,
drop_rate=0.,
bias=True
):
super().__init__()
hidden_features = int(mlp_ratio * dim)
self.fc1 = nn.Linear(dim, hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.head_drop = nn.Dropout(drop_rate)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.head_drop(x)
x = self.fc2(x)
return x
class MetaFormerBlock(nn.Module):
"""
Implementation of one MetaFormer block.
"""
def __init__(
self,
dim,
token_mixer=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
norm_layer=LayerNorm2d,
proj_drop=0.,
drop_path=0.,
use_nchw=True,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs
):
super().__init__()
ls_layer = partial(Scale, dim=dim, init_value=layer_scale_init_value, use_nchw=use_nchw)
rs_layer = partial(Scale, dim=dim, init_value=res_scale_init_value, use_nchw=use_nchw)
self.norm1 = norm_layer(dim)
self.token_mixer = token_mixer(dim=dim, proj_drop=proj_drop, **kwargs)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale1 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale1 = rs_layer() if res_scale_init_value is not None else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
dim,
int(4 * dim),
act_layer=mlp_act,
bias=mlp_bias,
drop=proj_drop,
use_conv=use_nchw,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale2 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale2 = rs_layer() if res_scale_init_value is not None else nn.Identity()
def forward(self, x):
x = self.res_scale1(x) + \
self.layer_scale1(
self.drop_path1(
self.token_mixer(self.norm1(x))
)
)
x = self.res_scale2(x) + \
self.layer_scale2(
self.drop_path2(
self.mlp(self.norm2(x))
)
)
return x
class MetaFormerStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth=2,
token_mixer=nn.Identity,
mlp_act=StarReLU,
mlp_bias=False,
downsample_norm=LayerNorm2d,
norm_layer=LayerNorm2d,
proj_drop=0.,
dp_rates=[0.] * 2,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs,
):
super().__init__()
self.grad_checkpointing = False
self.use_nchw = not issubclass(token_mixer, Attention)
# don't downsample if in_chs and out_chs are the same
self.downsample = nn.Identity() if in_chs == out_chs else Downsampling(
in_chs,
out_chs,
kernel_size=3,
stride=2,
padding=1,
norm_layer=downsample_norm,
)
self.blocks = nn.Sequential(*[MetaFormerBlock(
dim=out_chs,
token_mixer=token_mixer,
mlp_act=mlp_act,
mlp_bias=mlp_bias,
norm_layer=norm_layer,
proj_drop=proj_drop,
drop_path=dp_rates[i],
layer_scale_init_value=layer_scale_init_value,
res_scale_init_value=res_scale_init_value,
use_nchw=self.use_nchw,
**kwargs,
) for i in range(depth)])
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x: Tensor):
x = self.downsample(x)
B, C, H, W = x.shape
if not self.use_nchw:
x = x.reshape(B, C, -1).transpose(1, 2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
if not self.use_nchw:
x = x.transpose(1, 2).reshape(B, C, H, W)
return x
class MetaFormer(nn.Module):
r""" MetaFormer
A PyTorch impl of : `MetaFormer Baselines for Vision` -
https://arxiv.org/abs/2210.13452
Args:
in_chans (int): Number of input image channels.
num_classes (int): Number of classes for classification head.
global_pool: Pooling for classifier head.
depths (list or tuple): Number of blocks at each stage.
dims (list or tuple): Feature dimension at each stage.
token_mixers (list, tuple or token_fcn): Token mixer for each stage.
mlp_act: Activation layer for MLP.
mlp_bias (boolean): Enable or disable mlp bias term.
drop_path_rate (float): Stochastic depth rate.
drop_rate (float): Dropout rate.
layer_scale_init_values (list, tuple, float or None): Init value for Layer Scale.
None means not use the layer scale. Form: https://arxiv.org/abs/2103.17239.
res_scale_init_values (list, tuple, float or None): Init value for res Scale on residual connections.
None means not use the res scale. From: https://arxiv.org/abs/2110.09456.
downsample_norm (nn.Module): Norm layer used in stem and downsampling layers.
norm_layers (list, tuple or norm_fcn): Norm layers for each stage.
output_norm: Norm layer before classifier head.
use_mlp_head: Use MLP classification head.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
depths=(2, 2, 6, 2),
dims=(64, 128, 320, 512),
token_mixers=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
drop_path_rate=0.,
proj_drop_rate=0.,
drop_rate=0.0,
layer_scale_init_values=None,
res_scale_init_values=(None, None, 1.0, 1.0),
downsample_norm=LayerNorm2dNoBias,
norm_layers=LayerNorm2dNoBias,
output_norm=LayerNorm2d,
use_mlp_head=True,
**kwargs,
):
super().__init__()
self.num_classes = num_classes
self.num_features = dims[-1]
self.drop_rate = drop_rate
self.use_mlp_head = use_mlp_head
self.num_stages = len(depths)
# convert everything to lists if they aren't indexable
if not isinstance(depths, (list, tuple)):
depths = [depths] # it means the model has only one stage
if not isinstance(dims, (list, tuple)):
dims = [dims]
if not isinstance(token_mixers, (list, tuple)):
token_mixers = [token_mixers] * self.num_stages
if not isinstance(norm_layers, (list, tuple)):
norm_layers = [norm_layers] * self.num_stages
if not isinstance(layer_scale_init_values, (list, tuple)):
layer_scale_init_values = [layer_scale_init_values] * self.num_stages
if not isinstance(res_scale_init_values, (list, tuple)):
res_scale_init_values = [res_scale_init_values] * self.num_stages
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(
in_chans,
dims[0],
norm_layer=downsample_norm
)
stages = []
prev_dim = dims[0]
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
for i in range(self.num_stages):
stages += [MetaFormerStage(
prev_dim,
dims[i],
depth=depths[i],
token_mixer=token_mixers[i],
mlp_act=mlp_act,
mlp_bias=mlp_bias,
proj_drop=proj_drop_rate,
dp_rates=dp_rates[i],
layer_scale_init_value=layer_scale_init_values[i],
res_scale_init_value=res_scale_init_values[i],
downsample_norm=downsample_norm,
norm_layer=norm_layers[i],
**kwargs,
)]
prev_dim = dims[i]
self.feature_info += [dict(num_chs=dims[i], reduction=2, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# if using MlpHead, dropout is handled by MlpHead
if num_classes > 0:
if self.use_mlp_head:
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
else:
final = nn.Linear(self.num_features, num_classes)
else:
final = nn.Identity()
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', output_norm(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(drop_rate) if self.use_mlp_head else nn.Identity()),
('fc', final)
]))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for stage in self.stages:
stage.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool=None):
if global_pool is not None:
self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity()
if num_classes > 0:
if self.use_mlp_head:
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
else:
final = nn.Linear(self.num_features, num_classes)
else:
final = nn.Identity()
self.head.fc = final
def forward_head(self, x: Tensor, pre_logits: bool = False):
# NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :(
x = self.head.global_pool(x)
x = self.head.norm(x)
x = self.head.flatten(x)
x = self.head.drop(x)
return x if pre_logits else self.head.fc(x)
def forward_features(self, x: Tensor):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward(self, x: Tensor):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# this works but it's long and breaks backwards compatability with weights from the poolformer-only impl
def checkpoint_filter_fn(state_dict, model):
if 'stem.conv.weight' in state_dict:
return state_dict
import re
out_dict = {}
is_poolformerv1 = 'network.0.0.mlp.fc1.weight' in state_dict
model_state_dict = model.state_dict()
for k, v in state_dict.items():
if is_poolformerv1:
k = re.sub(r'layer_scale_([0-9]+)', r'layer_scale\1.scale', k)
k = k.replace('network.1', 'downsample_layers.1')
k = k.replace('network.3', 'downsample_layers.2')
k = k.replace('network.5', 'downsample_layers.3')
k = k.replace('network.2', 'network.1')
k = k.replace('network.4', 'network.2')
k = k.replace('network.6', 'network.3')
k = k.replace('network', 'stages')
k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k)
k = k.replace('downsample.proj', 'downsample.conv')
k = k.replace('patch_embed.proj', 'patch_embed.conv')
k = re.sub(r'([0-9]+).([0-9]+)', r'\1.blocks.\2', k)
k = k.replace('stages.0.downsample', 'patch_embed')
k = k.replace('patch_embed', 'stem')
k = k.replace('post_norm', 'norm')
k = k.replace('pre_norm', 'norm')
k = re.sub(r'^head', 'head.fc', k)
k = re.sub(r'^norm', 'head.norm', k)
if v.shape != model_state_dict[k] and v.numel() == model_state_dict[k].numel():
v = v.reshape(model_state_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_metaformer(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (2, 2, 6, 2))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
MetaFormer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 1.0, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'classifier': 'head.fc', 'first_conv': 'stem.conv',
**kwargs
}
default_cfgs = generate_default_cfgs({
'poolformer_s12.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s24.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformer_m48.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformerv2_s12.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s24.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m48.sail_in1k': _cfg(hf_hub_id='timm/'),
'convformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
})
@register_model
def poolformer_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m48', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m48', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_b36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_b36', pretrained=pretrained, **model_kwargs)
| pytorch-image-models/timm/models/metaformer.py/0 | {
"file_path": "pytorch-image-models/timm/models/metaformer.py",
"repo_id": "pytorch-image-models",
"token_count": 17521
} | 202 |
""" Res2Net and Res2NeXt
Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/
Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
"""
import math
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
__all__ = []
class Bottle2neck(nn.Module):
""" Res2Net/Res2NeXT Bottleneck
Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py
"""
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=26,
scale=4,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=None,
attn_layer=None,
**_,
):
super(Bottle2neck, self).__init__()
self.scale = scale
self.is_first = stride > 1 or downsample is not None
self.num_scales = max(1, scale - 1)
width = int(math.floor(planes * (base_width / 64.0))) * cardinality
self.width = width
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False)
self.bn1 = norm_layer(width * scale)
convs = []
bns = []
for i in range(self.num_scales):
convs.append(nn.Conv2d(
width, width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, bias=False))
bns.append(norm_layer(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
if self.is_first:
# FIXME this should probably have count_include_pad=False, but hurts original weights
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
else:
self.pool = None
self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False)
self.bn3 = norm_layer(outplanes)
self.se = attn_layer(outplanes) if attn_layer is not None else None
self.relu = act_layer(inplace=True)
self.downsample = downsample
def zero_init_last(self):
if getattr(self.bn3, 'weight', None) is not None:
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
spo = []
sp = spx[0] # redundant, for torchscript
for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):
if i == 0 or self.is_first:
sp = spx[i]
else:
sp = sp + spx[i]
sp = conv(sp)
sp = bn(sp)
sp = self.relu(sp)
spo.append(sp)
if self.scale > 1:
if self.pool is not None: # self.is_first == True, None check for torchscript
spo.append(self.pool(spx[-1]))
else:
spo.append(spx[-1])
out = torch.cat(spo, 1)
out = self.conv3(out)
out = self.bn3(out)
if self.se is not None:
out = self.se(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.relu(out)
return out
def _create_res2net(variant, pretrained=False, **kwargs):
return build_model_with_cfg(ResNet, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'res2net50_26w_4s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_48w_2s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_14w_8s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_26w_6s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_26w_8s.in1k': _cfg(hf_hub_id='timm/'),
'res2net101_26w_4s.in1k': _cfg(hf_hub_id='timm/'),
'res2next50.in1k': _cfg(hf_hub_id='timm/'),
'res2net50d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'),
'res2net101d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'),
})
@register_model
def res2net50_26w_4s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 26w4s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4))
return _create_res2net('res2net50_26w_4s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net101_26w_4s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-101 26w4s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4))
return _create_res2net('res2net101_26w_4s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_26w_6s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 26w6s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6))
return _create_res2net('res2net50_26w_6s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_26w_8s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 26w8s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8))
return _create_res2net('res2net50_26w_8s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_48w_2s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 48w2s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2))
return _create_res2net('res2net50_48w_2s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_14w_8s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 14w8s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8))
return _create_res2net('res2net50_14w_8s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2next50(pretrained=False, **kwargs) -> ResNet:
"""Construct Res2NeXt-50 4s
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4))
return _create_res2net('res2next50', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50d(pretrained=False, **kwargs) -> ResNet:
"""Construct Res2Net-50
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, stem_type='deep',
avg_down=True, stem_width=32, block_args=dict(scale=4))
return _create_res2net('res2net50d', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net101d(pretrained=False, **kwargs) -> ResNet:
"""Construct Res2Net-50
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, stem_type='deep',
avg_down=True, stem_width=32, block_args=dict(scale=4))
return _create_res2net('res2net101d', pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/res2net.py/0 | {
"file_path": "pytorch-image-models/timm/models/res2net.py",
"repo_id": "pytorch-image-models",
"token_count": 3659
} | 203 |
"""VGG
Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for
timm functionality.
Copyright 2021 Ross Wightman
"""
from typing import Union, List, Dict, Any, cast
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs
__all__ = ['VGG']
cfgs: Dict[str, List[Union[str, int]]] = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class ConvMlp(nn.Module):
def __init__(
self,
in_features=512,
out_features=4096,
kernel_size=7,
mlp_ratio=1.0,
drop_rate: float = 0.2,
act_layer: nn.Module = None,
conv_layer: nn.Module = None,
):
super(ConvMlp, self).__init__()
self.input_kernel_size = kernel_size
mid_features = int(out_features * mlp_ratio)
self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True)
self.act1 = act_layer(True)
self.drop = nn.Dropout(drop_rate)
self.fc2 = conv_layer(mid_features, out_features, 1, bias=True)
self.act2 = act_layer(True)
def forward(self, x):
if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size:
# keep the input size >= 7x7
output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1]))
x = F.adaptive_avg_pool2d(x, output_size)
x = self.fc1(x)
x = self.act1(x)
x = self.drop(x)
x = self.fc2(x)
x = self.act2(x)
return x
class VGG(nn.Module):
def __init__(
self,
cfg: List[Any],
num_classes: int = 1000,
in_chans: int = 3,
output_stride: int = 32,
mlp_ratio: float = 1.0,
act_layer: nn.Module = nn.ReLU,
conv_layer: nn.Module = nn.Conv2d,
norm_layer: nn.Module = None,
global_pool: str = 'avg',
drop_rate: float = 0.,
) -> None:
super(VGG, self).__init__()
assert output_stride == 32
self.num_classes = num_classes
self.num_features = 4096
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.use_norm = norm_layer is not None
self.feature_info = []
prev_chs = in_chans
net_stride = 1
pool_layer = nn.MaxPool2d
layers: List[nn.Module] = []
for v in cfg:
last_idx = len(layers) - 1
if v == 'M':
self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}'))
layers += [pool_layer(kernel_size=2, stride=2)]
net_stride *= 2
else:
v = cast(int, v)
conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1)
if norm_layer is not None:
layers += [conv2d, norm_layer(v), act_layer(inplace=True)]
else:
layers += [conv2d, act_layer(inplace=True)]
prev_chs = v
self.features = nn.Sequential(*layers)
self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}'))
self.pre_logits = ConvMlp(
prev_chs,
self.num_features,
7,
mlp_ratio=mlp_ratio,
drop_rate=drop_rate,
act_layer=act_layer,
conv_layer=conv_layer,
)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
self._initialize_weights()
@torch.jit.ignore
def group_matcher(self, coarse=False):
# this treats BN layers as separate groups for bn variants, a lot of effort to fix that
return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)')
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.head = ClassifierHead(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
x = self.pre_logits(x)
return x if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _filter_fn(state_dict):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
k_r = k
k_r = k_r.replace('classifier.0', 'pre_logits.fc1')
k_r = k_r.replace('classifier.3', 'pre_logits.fc2')
k_r = k_r.replace('classifier.6', 'head.fc')
if 'classifier.0.weight' in k:
v = v.reshape(-1, 512, 7, 7)
if 'classifier.3.weight' in k:
v = v.reshape(-1, 4096, 1, 1)
out_dict[k_r] = v
return out_dict
def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG:
cfg = variant.split('_')[0]
# NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5]
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5))
model = build_model_with_cfg(
VGG,
variant,
pretrained,
model_cfg=cfgs[cfg],
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
pretrained_filter_fn=_filter_fn,
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'features.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg11', pretrained=pretrained, **model_args)
@register_model
def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args)
@register_model
def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg13', pretrained=pretrained, **model_args)
@register_model
def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args)
@register_model
def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg16', pretrained=pretrained, **model_args)
@register_model
def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args)
@register_model
def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg19', pretrained=pretrained, **model_args)
@register_model
def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) | pytorch-image-models/timm/models/vgg.py/0 | {
"file_path": "pytorch-image-models/timm/models/vgg.py",
"repo_id": "pytorch-image-models",
"token_count": 5201
} | 204 |
""" AdamW Optimizer
Impl copied from PyTorch master
NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed
someday
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| pytorch-image-models/timm/optim/adamw.py/0 | {
"file_path": "pytorch-image-models/timm/optim/adamw.py",
"repo_id": "pytorch-image-models",
"token_count": 2417
} | 205 |
""" Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise, k-decay.
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lr_min: float = 0.,
cycle_mul: float = 1.,
cycle_decay: float = 1.,
cycle_limit: int = 1,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
k_decay=1.0,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1:
_logger.warning(
"Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.lr_min = lr_min
self.cycle_mul = cycle_mul
self.cycle_decay = cycle_decay
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.k_decay = k_decay
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.cycle_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul))
t_i = self.cycle_mul ** i * self.t_initial
t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.cycle_decay ** i
lr_max_values = [v * gamma for v in self.base_values]
k = self.k_decay
if i < self.cycle_limit:
lrs = [
self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_cycle_length(self, cycles=0):
cycles = max(1, cycles or self.cycle_limit)
if self.cycle_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
| pytorch-image-models/timm/scheduler/cosine_lr.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/cosine_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 2031
} | 206 |
""" Logging helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import logging.handlers
class FormatterNoInfo(logging.Formatter):
def __init__(self, fmt='%(levelname)s: %(message)s'):
logging.Formatter.__init__(self, fmt)
def format(self, record):
if record.levelno == logging.INFO:
return str(record.getMessage())
return logging.Formatter.format(self, record)
def setup_default_logging(default_level=logging.INFO, log_path=''):
console_handler = logging.StreamHandler()
console_handler.setFormatter(FormatterNoInfo())
logging.root.addHandler(console_handler)
logging.root.setLevel(default_level)
if log_path:
file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3)
file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s")
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler)
| pytorch-image-models/timm/utils/log.py/0 | {
"file_path": "pytorch-image-models/timm/utils/log.py",
"repo_id": "pytorch-image-models",
"token_count": 383
} | 207 |
<div align="center">
# Text Generation Inference benchmarking tool
![benchmark](../assets/benchmark.png)
</div>
A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha)
and powered by [tui](https://github.com/tui-rs-revival/ratatui).
## Install
```shell
make install-benchmark
```
## Run
First, start `text-generation-inference`:
```shell
text-generation-launcher --model-id bigscience/bloom-560m
```
Then run the benchmarking tool:
```shell
text-generation-benchmark --tokenizer-name bigscience/bloom-560m
```
| text-generation-inference/benchmark/README.md/0 | {
"file_path": "text-generation-inference/benchmark/README.md",
"repo_id": "text-generation-inference",
"token_count": 187
} | 208 |
import pytest
from text_generation import (
InferenceAPIClient,
InferenceAPIAsyncClient,
Client,
AsyncClient,
)
from text_generation.errors import NotSupportedError, NotFoundError
from text_generation.inference_api import check_model_support, deployed_models
def test_check_model_support(flan_t5_xxl, unsupported_model, fake_model):
assert check_model_support(flan_t5_xxl)
assert not check_model_support(unsupported_model)
with pytest.raises(NotFoundError):
check_model_support(fake_model)
def test_deployed_models():
deployed_models()
def test_client(flan_t5_xxl):
client = InferenceAPIClient(flan_t5_xxl)
assert isinstance(client, Client)
def test_client_unsupported_model(unsupported_model):
with pytest.raises(NotSupportedError):
InferenceAPIClient(unsupported_model)
def test_async_client(flan_t5_xxl):
client = InferenceAPIAsyncClient(flan_t5_xxl)
assert isinstance(client, AsyncClient)
def test_async_client_unsupported_model(unsupported_model):
with pytest.raises(NotSupportedError):
InferenceAPIAsyncClient(unsupported_model)
| text-generation-inference/clients/python/tests/test_inference_api.py/0 | {
"file_path": "text-generation-inference/clients/python/tests/test_inference_api.py",
"repo_id": "text-generation-inference",
"token_count": 411
} | 209 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.5625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4609375,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5585938,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4003906,
"text": ","
},
{
"id": 1669,
"logprob": -1.5673828,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94628906,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.703125,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7646484,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.6113281,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5263672,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.2119141,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11883545,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.40844727,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.0037841797,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0195312,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.53125,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4140625,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5234375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3613281,
"text": ","
},
{
"id": 1669,
"logprob": -1.5458984,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94189453,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7548828,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.578125,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5117188,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11004639,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4506836,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.53125,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4140625,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5234375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3613281,
"text": ","
},
{
"id": 1669,
"logprob": -1.5458984,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94189453,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7548828,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.578125,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5117188,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11004639,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4506836,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.53125,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4140625,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5234375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3613281,
"text": ","
},
{
"id": 1669,
"logprob": -1.5458984,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94189453,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7548828,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.578125,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5117188,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11004639,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4506836,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json",
"repo_id": "text-generation-inference",
"token_count": 7244
} | 210 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.0332031,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04257202,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4785156,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32495117,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7709961,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33740234,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.00995636,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64208984,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.4970703,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46533203,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5336914,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022735596,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.03125,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04244995,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4863281,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32714844,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7685547,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33666992,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.01008606,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64160156,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.5,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46557617,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5341797,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022907257,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.0332031,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04257202,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4785156,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32495117,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7709961,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33740234,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.00995636,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64208984,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.4970703,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46533203,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5336914,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022735596,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.0332031,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04257202,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4785156,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32495117,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7709961,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33740234,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.00995636,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64208984,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.4970703,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46533203,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5336914,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022735596,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_load.json",
"repo_id": "text-generation-inference",
"token_count": 6602
} | 211 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.1054688,
"text": " your"
},
{
"id": 12315,
"logprob": -9.953125,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.0820312,
"text": " today"
},
{
"id": 32,
"logprob": -0.15148926,
"text": "?"
},
{
"id": 50279,
"logprob": -0.27026367,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.88378906,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9819336,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2421875,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3474121,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.078125,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.69140625,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.4072266,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7041016,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.053375244,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0351562,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json",
"repo_id": "text-generation-inference",
"token_count": 6308
} | 212 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 610,
"logprob": null,
"text": "def"
},
{
"id": 1489,
"logprob": -5.2617188,
"text": " print"
},
{
"id": 100,
"logprob": -0.38476562,
"text": "_"
},
{
"id": 7670,
"logprob": -7.640625,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2284,
"logprob": -0.92626953,
"special": false,
"text": "():"
},
{
"id": 303,
"logprob": -0.40722656,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": -0.27954102,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": -0.6142578,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": -0.68310547,
"special": false,
"text": "Hello"
},
{
"id": 10914,
"logprob": -1.4570312,
"special": false,
"text": " World"
},
{
"id": 16013,
"logprob": -0.80126953,
"special": false,
"text": "!\")"
},
{
"id": 222,
"logprob": -0.6303711,
"special": false,
"text": "\n"
},
{
"id": 222,
"logprob": -0.23327637,
"special": false,
"text": "\n"
},
{
"id": 610,
"logprob": -1.2304688,
"special": false,
"text": "def"
}
],
"top_tokens": null
},
"generated_text": "():\n print(\"Hello World!\")\n\ndef"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 610,
"logprob": null,
"text": "def"
},
{
"id": 1489,
"logprob": -5.2617188,
"text": " print"
},
{
"id": 100,
"logprob": -0.38476562,
"text": "_"
},
{
"id": 7670,
"logprob": -7.640625,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2284,
"logprob": -0.92626953,
"special": false,
"text": "():"
},
{
"id": 303,
"logprob": -0.40722656,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": -0.27954102,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": -0.6142578,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": -0.68310547,
"special": false,
"text": "Hello"
},
{
"id": 10914,
"logprob": -1.4570312,
"special": false,
"text": " World"
},
{
"id": 16013,
"logprob": -0.80126953,
"special": false,
"text": "!\")"
},
{
"id": 222,
"logprob": -0.6303711,
"special": false,
"text": "\n"
},
{
"id": 222,
"logprob": -0.23327637,
"special": false,
"text": "\n"
},
{
"id": 610,
"logprob": -1.2304688,
"special": false,
"text": "def"
}
],
"top_tokens": null
},
"generated_text": "():\n print(\"Hello World!\")\n\ndef"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 610,
"logprob": null,
"text": "def"
},
{
"id": 1489,
"logprob": -5.2617188,
"text": " print"
},
{
"id": 100,
"logprob": -0.38476562,
"text": "_"
},
{
"id": 7670,
"logprob": -7.640625,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2284,
"logprob": -0.92626953,
"special": false,
"text": "():"
},
{
"id": 303,
"logprob": -0.40722656,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": -0.27954102,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": -0.6142578,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": -0.68310547,
"special": false,
"text": "Hello"
},
{
"id": 10914,
"logprob": -1.4570312,
"special": false,
"text": " World"
},
{
"id": 16013,
"logprob": -0.80126953,
"special": false,
"text": "!\")"
},
{
"id": 222,
"logprob": -0.6303711,
"special": false,
"text": "\n"
},
{
"id": 222,
"logprob": -0.23327637,
"special": false,
"text": "\n"
},
{
"id": 610,
"logprob": -1.2304688,
"special": false,
"text": "def"
}
],
"top_tokens": null
},
"generated_text": "():\n print(\"Hello World!\")\n\ndef"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 610,
"logprob": null,
"text": "def"
},
{
"id": 1489,
"logprob": -5.2617188,
"text": " print"
},
{
"id": 100,
"logprob": -0.38476562,
"text": "_"
},
{
"id": 7670,
"logprob": -7.640625,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2284,
"logprob": -0.92626953,
"special": false,
"text": "():"
},
{
"id": 303,
"logprob": -0.40722656,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": -0.27954102,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": -0.6142578,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": -0.68310547,
"special": false,
"text": "Hello"
},
{
"id": 10914,
"logprob": -1.4570312,
"special": false,
"text": " World"
},
{
"id": 16013,
"logprob": -0.80126953,
"special": false,
"text": "!\")"
},
{
"id": 222,
"logprob": -0.6303711,
"special": false,
"text": "\n"
},
{
"id": 222,
"logprob": -0.23327637,
"special": false,
"text": "\n"
},
{
"id": 610,
"logprob": -1.2304688,
"special": false,
"text": "def"
}
],
"top_tokens": null
},
"generated_text": "():\n print(\"Hello World!\")\n\ndef"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2/test_flash_starcoder2_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2/test_flash_starcoder2_load.json",
"repo_id": "text-generation-inference",
"token_count": 5236
} | 213 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox_load.json",
"repo_id": "text-generation-inference",
"token_count": 6296
} | 214 |
import pytest
import json
from text_generation.types import GrammarType
@pytest.fixture(scope="module")
def flash_llama_grammar_handle(launcher):
with launcher(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=2, disable_grammar_support=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_grammar(flash_llama_grammar_handle):
await flash_llama_grammar_handle.health(300)
return flash_llama_grammar_handle.client
@pytest.mark.asyncio
async def test_flash_llama_grammar(flash_llama_grammar, response_snapshot):
response = await flash_llama_grammar.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_regex(flash_llama_grammar, response_snapshot):
response = await flash_llama_grammar.generate(
"Whats Googles DNS",
max_new_tokens=10,
decoder_input_details=True,
seed=0,
grammar={
"type": GrammarType.Regex, # "regex"
"value": "((25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)",
},
)
assert response.details.generated_tokens == 10
assert response.generated_text == "42.1.1.101"
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_json(flash_llama_grammar, response_snapshot):
response = await flash_llama_grammar.generate(
"info: david holtz like trees and has two cats. ",
max_new_tokens=100,
decoder_input_details=True,
seed=0,
grammar={
"type": GrammarType.Json, # "json"
"value": json.dumps(
{
"type": "object",
"$id": "https://example.com/person.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Person",
"properties": {
"firstName": {
"type": "string",
"description": "The person'''s first name.",
},
"lastName": {
"type": "string",
"description": "The person'''s last name.",
},
"hobby": {
"description": "The person'''s hobby.",
"type": "string",
},
"numCats": {
"description": "The number of cats the person has.",
"type": "integer",
"minimum": 0,
},
},
"required": ["firstName", "lastName", "hobby", "numCats"],
}
),
},
)
assert response.details.generated_tokens == 30
assert (
response.generated_text
== '{"firstName":"David","hobby":"Trees","lastName":"Holtz","numCats":2}'
)
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_load(
flash_llama_grammar, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_grammar,
"name: david. email: ",
max_new_tokens=10,
n=4,
stop_sequences=[".com"],
seed=0,
grammar={
"type": GrammarType.Regex, # "regex"
"value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex
},
)
assert len(responses) == 4
expected = "[email protected]"
for response in responses:
assert response.generated_text == expected
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
# this is the same as the above test, but only fires off a single request
# this is only to ensure that the parallel and single inference produce the same result
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_single_load_instance(
flash_llama_grammar, generate_load, response_snapshot
):
response = await flash_llama_grammar.generate(
"name: david. email: ",
max_new_tokens=10,
stop_sequences=[".com"],
seed=0,
grammar={
"type": GrammarType.Regex, # "regex"
"value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex
},
)
# assert response.details.generated_tokens == 30
assert response.generated_text == "[email protected]"
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_grammar_llama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_grammar_llama.py",
"repo_id": "text-generation-inference",
"token_count": 2366
} | 215 |
import pytest
@pytest.fixture(scope="module")
def mpt_sharded_handle(launcher):
with launcher("mosaicml/mpt-7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def mpt_sharded(mpt_sharded_handle):
await mpt_sharded_handle.health(300)
return mpt_sharded_handle.client
@pytest.mark.asyncio
async def test_mpt(mpt_sharded, response_snapshot):
response = await mpt_sharded.generate(
"What is Deep Learning?",
max_new_tokens=17,
decoder_input_details=True,
)
assert response.details.generated_tokens == 17
assert (
response.generated_text
== " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
)
assert response == response_snapshot
@pytest.mark.asyncio
async def test_mpt_load(mpt_sharded, generate_load, response_snapshot):
responses = await generate_load(
mpt_sharded,
"What is Deep Learning?",
max_new_tokens=17,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert (
responses[0].generated_text
== " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_mpt.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_mpt.py",
"repo_id": "text-generation-inference",
"token_count": 525
} | 216 |
import { get_options, run } from "./common.js";
const reference_latency_ms = 70;
const host = __ENV.HOST || '127.0.0.1:8000';
const max_new_tokens = 50;
function generate_payload(gpt){
const input = gpt["conversations"][0]["value"];
return {"inputs": input, "parameters": {"max_new_tokens": max_new_tokens, "decoder_input_details": true}}
}
export const options = get_options(reference_latency_ms);
export default function(){
run(host, generate_payload, max_new_tokens);
}
| text-generation-inference/load_tests/tgi.js/0 | {
"file_path": "text-generation-inference/load_tests/tgi.js",
"repo_id": "text-generation-inference",
"token_count": 184
} | 217 |
mod health;
/// Text Generation Inference Webserver
mod infer;
mod queue;
pub mod server;
mod validation;
use infer::{Infer, InferError, InferStreamResponse};
use queue::{Entry, Queue};
use serde::{Deserialize, Serialize};
use tokio::sync::OwnedSemaphorePermit;
use tokio_stream::wrappers::UnboundedReceiverStream;
use utoipa::ToSchema;
use validation::Validation;
/// Type alias for generation responses
pub(crate) type GenerateStreamResponse = (
OwnedSemaphorePermit,
u32, // input_length
UnboundedReceiverStream<Result<InferStreamResponse, InferError>>,
);
#[derive(Clone, Deserialize, ToSchema)]
pub(crate) struct VertexInstance {
#[schema(example = "What is Deep Learning?")]
pub inputs: String,
#[schema(nullable = true, default = "null", example = "null")]
pub parameters: Option<GenerateParameters>,
}
#[derive(Deserialize, ToSchema)]
pub(crate) struct VertexRequest {
#[serde(rename = "instances")]
pub instances: Vec<VertexInstance>,
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct VertexResponse {
pub predictions: Vec<String>,
}
/// Hub type
#[derive(Clone, Debug, Deserialize)]
pub struct HubModelInfo {
#[serde(rename(deserialize = "id"))]
pub model_id: String,
pub sha: Option<String>,
pub pipeline_tag: Option<String>,
}
#[derive(Clone, Deserialize, Default)]
pub struct HubTokenizerConfig {
pub chat_template: Option<String>,
pub completion_template: Option<String>,
#[serde(deserialize_with = "token_serde::deserialize")]
pub bos_token: Option<String>,
#[serde(deserialize_with = "token_serde::deserialize")]
pub eos_token: Option<String>,
}
impl HubTokenizerConfig {
pub fn from_file(filename: &std::path::Path) -> Self {
let content = std::fs::read_to_string(filename).unwrap();
serde_json::from_str(&content).unwrap_or_default()
}
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
#[serde(tag = "type", content = "value")]
pub(crate) enum GrammarType {
/// A string that represents a [JSON Schema](https://json-schema.org/).
///
/// JSON Schema is a declarative language that allows to annotate JSON documents
/// with types and descriptions.
#[serde(rename = "json")]
#[schema(example = json ! ({"properties": {"location":{"type": "string"}}}))]
Json(serde_json::Value),
#[serde(rename = "regex")]
Regex(String),
}
mod token_serde {
use super::*;
use serde::de;
use serde::Deserializer;
use serde_json::Value;
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
where
D: Deserializer<'de>,
{
let value = Value::deserialize(deserializer)?;
match value {
Value::String(s) => Ok(Some(s)),
Value::Object(map) => {
if let Some(content) = map.get("content").and_then(|v| v.as_str()) {
Ok(Some(content.to_string()))
} else {
Err(de::Error::custom(
"content key not found in structured token",
))
}
}
_ => Err(de::Error::custom("invalid token format")),
}
}
}
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct Info {
/// Model info
#[schema(example = "bigscience/blomm-560m")]
pub model_id: String,
#[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")]
pub model_sha: Option<String>,
#[schema(example = "torch.float16")]
pub model_dtype: String,
#[schema(example = "cuda")]
pub model_device_type: String,
#[schema(nullable = true, example = "text-generation")]
pub model_pipeline_tag: Option<String>,
/// Router Parameters
#[schema(example = "128")]
pub max_concurrent_requests: usize,
#[schema(example = "2")]
pub max_best_of: usize,
#[schema(example = "4")]
pub max_stop_sequences: usize,
#[schema(example = "1024")]
pub max_input_length: usize,
#[schema(example = "2048")]
pub max_total_tokens: usize,
#[schema(example = "1.2")]
pub waiting_served_ratio: f32,
#[schema(example = "32000")]
pub max_batch_total_tokens: u32,
#[schema(example = "20")]
pub max_waiting_tokens: usize,
#[schema(nullable = true, example = "null")]
pub max_batch_size: Option<usize>,
#[schema(example = "2")]
pub validation_workers: usize,
/// Router Info
#[schema(example = "0.5.0")]
pub version: &'static str,
#[schema(nullable = true, example = "null")]
pub sha: Option<&'static str>,
#[schema(nullable = true, example = "null")]
pub docker_label: Option<&'static str>,
}
#[derive(Clone, Debug, Deserialize, ToSchema, Default)]
pub(crate) struct GenerateParameters {
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)]
pub best_of: Option<usize>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
nullable = true,
default = "null",
example = 0.5
)]
pub temperature: Option<f32>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
nullable = true,
default = "null",
example = 1.03
)]
pub repetition_penalty: Option<f32>,
#[serde(default)]
#[schema(
exclusive_minimum = -2.0,
nullable = true,
default = "null",
example = 0.1
)]
pub frequency_penalty: Option<f32>,
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)]
pub top_k: Option<i32>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
maximum = 1.0,
nullable = true,
default = "null",
example = 0.95
)]
pub top_p: Option<f32>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
maximum = 1.0,
nullable = true,
default = "null",
example = 0.95
)]
pub typical_p: Option<f32>,
#[serde(default)]
#[schema(default = "false", example = true)]
pub do_sample: bool,
#[serde(default = "default_max_new_tokens")]
#[schema(nullable = true, default = "100", example = "20")]
pub max_new_tokens: Option<u32>,
#[serde(default)]
#[schema(nullable = true, default = "null", example = false)]
pub return_full_text: Option<bool>,
#[serde(default)]
#[schema(inline, max_items = 4, example = json ! (["photographer"]))]
pub stop: Vec<String>,
#[serde(default)]
#[schema(nullable = true, default = "null", example = "null")]
pub truncate: Option<usize>,
#[serde(default)]
#[schema(default = "false", example = true)]
pub watermark: bool,
#[serde(default)]
#[schema(default = "true")]
pub details: bool,
#[serde(default)]
#[schema(default = "true")]
pub decoder_input_details: bool,
#[serde(default)]
#[schema(
exclusive_minimum = 0,
nullable = true,
default = "null",
example = "null"
)]
pub seed: Option<u64>,
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)]
pub top_n_tokens: Option<u32>,
#[serde(default)]
pub grammar: Option<GrammarType>,
}
fn default_max_new_tokens() -> Option<u32> {
Some(100)
}
fn default_parameters() -> GenerateParameters {
GenerateParameters {
best_of: None,
temperature: None,
repetition_penalty: None,
frequency_penalty: None,
top_k: None,
top_p: None,
typical_p: None,
do_sample: true,
max_new_tokens: default_max_new_tokens(),
return_full_text: None,
stop: Vec::new(),
truncate: None,
watermark: false,
details: false,
decoder_input_details: false,
seed: None,
top_n_tokens: None,
grammar: None,
}
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub struct CompletionRequest {
/// UNUSED
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
/// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
pub model: String,
/// The prompt to generate completions for.
#[schema(example = "What is Deep Learning?")]
pub prompt: String,
/// The maximum number of tokens that can be generated in the chat completion.
#[serde(default)]
#[schema(default = "32")]
pub max_tokens: Option<u32>,
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
/// lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
#[serde(default)]
#[schema(nullable = true, example = 1.0)]
pub temperature: Option<f32>,
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
/// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
#[serde(default)]
#[schema(nullable = true, example = 0.95)]
pub top_p: Option<f32>,
#[serde(default = "bool::default")]
pub stream: bool,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
/// The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.
/// please see the completion_template field in the model's tokenizer_config.json file for completion template.
#[serde(default)]
pub suffix: Option<String>,
#[serde(default)]
pub repetition_penalty: Option<f32>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.
#[serde(default)]
#[schema(example = "1.0")]
pub frequency_penalty: Option<f32>,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct Completion {
pub id: String,
pub object: String,
#[schema(example = "1706270835")]
pub created: u64,
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<CompletionComplete>,
pub usage: Usage,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionComplete {
pub index: u32,
pub text: String,
pub logprobs: Option<Vec<f32>>,
pub finish_reason: String,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletion {
pub id: String,
pub object: String,
#[schema(example = "1706270835")]
pub created: u64,
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<ChatCompletionComplete>,
pub usage: Usage,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionComplete {
pub index: u32,
pub message: Message,
pub logprobs: Option<ChatCompletionLogprobs>,
pub finish_reason: String,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprobs {
content: Vec<ChatCompletionLogprob>,
}
impl From<(Token, Vec<Token>)> for ChatCompletionLogprobs {
fn from(value: (Token, Vec<Token>)) -> Self {
let (token, top_tokens) = value;
Self {
content: vec![ChatCompletionLogprob {
token: token.text,
logprob: token.logprob,
top_logprobs: top_tokens
.into_iter()
.map(|t| ChatCompletionTopLogprob {
token: t.text,
logprob: t.logprob,
})
.collect(),
}],
}
}
}
impl From<(Vec<Token>, Vec<Vec<Token>>)> for ChatCompletionLogprobs {
fn from(value: (Vec<Token>, Vec<Vec<Token>>)) -> Self {
let (tokens, top_tokens) = value;
Self {
content: tokens
.into_iter()
.zip(top_tokens)
.map(|(t, top_t)| ChatCompletionLogprob {
token: t.text,
logprob: t.logprob,
top_logprobs: top_t
.into_iter()
.map(|t| ChatCompletionTopLogprob {
token: t.text,
logprob: t.logprob,
})
.collect(),
})
.collect(),
}
}
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprob {
token: String,
logprob: f32,
top_logprobs: Vec<ChatCompletionTopLogprob>,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionTopLogprob {
token: String,
logprob: f32,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct Usage {
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub total_tokens: u32,
}
impl ChatCompletion {
pub(crate) fn new(
model: String,
system_fingerprint: String,
output: Option<String>,
created: u64,
details: Details,
return_logprobs: bool,
tool_calls: Option<ToolCall>,
) -> Self {
Self {
id: String::new(),
object: "text_completion".into(),
created,
model,
system_fingerprint,
choices: vec![ChatCompletionComplete {
index: 0,
message: Message {
role: "assistant".into(),
content: output,
name: None,
tool_calls,
},
logprobs: return_logprobs
.then(|| ChatCompletionLogprobs::from((details.tokens, details.top_tokens))),
finish_reason: details.finish_reason.to_string(),
}],
usage: Usage {
prompt_tokens: details.prefill.len() as u32,
completion_tokens: details.generated_tokens,
total_tokens: details.prefill.len() as u32 + details.generated_tokens,
},
}
}
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionCompleteChunk {
pub id: String,
pub object: String,
pub created: u64,
pub choices: Vec<CompletionComplete>,
pub model: String,
pub system_fingerprint: String,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionChunk {
pub id: String,
pub object: String,
#[schema(example = "1706270978")]
pub created: u64,
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<ChatCompletionChoice>,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionChoice {
pub index: u32,
pub delta: ChatCompletionDelta,
pub logprobs: Option<ChatCompletionLogprobs>,
pub finish_reason: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionDelta {
#[schema(example = "user")]
pub role: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[schema(example = "What is Deep Learning?")]
pub content: Option<String>,
// default to None
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tool_calls: Option<DeltaToolCall>,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub(crate) struct DeltaToolCall {
pub index: u32,
pub id: String,
pub r#type: String,
pub function: Function,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub(crate) struct Function {
pub name: Option<String>,
pub arguments: String,
}
#[allow(clippy::too_many_arguments)]
impl ChatCompletionChunk {
pub(crate) fn new(
model: String,
system_fingerprint: String,
delta: Option<String>,
tool_calls: Option<Vec<String>>,
created: u64,
logprobs: Option<ChatCompletionLogprobs>,
finish_reason: Option<String>,
) -> Self {
Self {
id: String::new(),
object: "text_completion".to_string(),
created,
model,
system_fingerprint,
choices: vec![ChatCompletionChoice {
index: 0,
delta: ChatCompletionDelta {
role: "assistant".to_string(),
content: delta,
tool_calls: tool_calls.map(|tc| DeltaToolCall {
index: 0,
id: String::new(),
r#type: "function".to_string(),
function: Function {
name: None,
arguments: tc[0].to_string(),
},
}),
},
logprobs,
finish_reason,
}],
}
}
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct ChatRequest {
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
/// [UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
pub model: String,
/// A list of messages comprising the conversation so far.
#[schema(example = "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]")]
pub messages: Vec<Message>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.
#[serde(default)]
#[schema(example = "1.0")]
pub frequency_penalty: Option<f32>,
/// UNUSED
/// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
/// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
/// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
/// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
/// result in a ban or exclusive selection of the relevant token.
#[serde(default)]
pub logit_bias: Option<Vec<f32>>,
/// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each
/// output token returned in the content of message.
#[serde(default)]
#[schema(example = "false")]
pub logprobs: Option<bool>,
/// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with
/// an associated log probability. logprobs must be set to true if this parameter is used.
#[serde(default)]
#[schema(example = "5")]
pub top_logprobs: Option<u32>,
/// The maximum number of tokens that can be generated in the chat completion.
#[serde(default)]
#[schema(example = "32")]
pub max_tokens: Option<u32>,
/// UNUSED
/// How many chat completion choices to generate for each input message. Note that you will be charged based on the
/// number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
#[serde(default)]
#[schema(nullable = true, example = "2")]
pub n: Option<u32>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
/// increasing the model's likelihood to talk about new topics
#[serde(default)]
#[schema(nullable = true, example = 0.1)]
pub presence_penalty: Option<f32>,
/// Up to 4 sequences where the API will stop generating further tokens.
#[serde(default)]
#[schema(nullable = true, example = "null")]
pub stop: Option<Vec<String>>,
#[serde(default = "bool::default")]
pub stream: bool,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
/// lower values like 0.2 will make it more focused and deterministic.
///
/// We generally recommend altering this or `top_p` but not both.
#[serde(default)]
#[schema(nullable = true, example = 1.0)]
pub temperature: Option<f32>,
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
/// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
#[serde(default)]
#[schema(nullable = true, example = 0.95)]
pub top_p: Option<f32>,
/// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of
/// functions the model may generate JSON inputs for.
#[serde(default)]
#[schema(nullable = true, example = "null")]
pub tools: Option<Vec<Tool>>,
/// A prompt to be appended before the tools
#[serde(default = "default_tool_prompt")]
#[schema(
nullable = true,
example = "\"Based on the conversation, please choose the most appropriate tool to use: \""
)]
pub tool_prompt: Option<String>,
/// A specific tool to use. If not provided, the model will default to use any of the tools provided in the tools parameter.
#[serde(default)]
#[schema(nullable = true, example = "null")]
#[serde(deserialize_with = "deserialize_tool_choice::deserialize")]
pub tool_choice: Option<ToolType>,
}
fn default_tool_prompt() -> Option<String> {
Some(
"\nBased on the conversation, please choose the most appropriate tool to use: ".to_string(),
)
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
enum ToolType {
FunctionName(String),
OneOf,
}
/// Deserialize the tool choice from the JSON input or from the function name ("none" is allowed but mapped to None)
mod deserialize_tool_choice {
use super::*;
use serde::de;
use serde::Deserializer;
use serde_json::Value;
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<ToolType>, D::Error>
where
D: Deserializer<'de>,
{
let value = Value::deserialize(deserializer)?;
match value {
Value::String(s) => match s.as_str() {
"none" => Ok(None),
"auto" => Ok(Some(ToolType::OneOf)),
_ => Ok(Some(ToolType::FunctionName(s))),
},
Value::Object(map) => {
if let Some(content) = map
.get("function")
.and_then(|v| v.get("name"))
.and_then(|v| v.as_str())
{
Ok(Some(ToolType::FunctionName(content.to_string())))
} else {
Err(de::Error::custom("function key not found in tool choice"))
}
}
Value::Null => Ok(Some(ToolType::OneOf)),
_ => Err(de::Error::custom("invalid token format")),
}
}
}
#[derive(Debug, Deserialize, Serialize, ToSchema)]
pub struct Tools {
#[serde(flatten)]
functions_map: FunctionsMap,
properties: Properties,
}
#[derive(Debug, Serialize, Deserialize)]
struct FunctionsMap {
#[serde(rename = "$functions")]
functions: std::collections::HashMap<String, serde_json::Value>,
}
#[derive(Debug, Serialize, Deserialize)]
struct FunctionRef {
#[serde(rename = "$ref")]
ref_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct Properties {
#[serde(serialize_with = "serialize_function")]
function: Vec<FunctionRef>,
}
fn serialize_function<S>(functions: &Vec<FunctionRef>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
use serde::ser::SerializeStruct;
let mut state = serializer.serialize_struct("Function", 1)?;
state.serialize_field("anyOf", functions)?;
state.end()
}
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct FunctionDefinition {
#[serde(default)]
pub description: Option<String>,
pub name: String,
pub parameters: serde_json::Value,
}
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
pub(crate) struct Tool {
// The type of the tool. Currently, only 'function' is supported.
#[schema(example = "function")]
pub r#type: String,
// Grab the tool as generic JSON for debugging purposes.
pub function: FunctionDefinition,
}
#[derive(Clone, Serialize, Deserialize)]
pub(crate) struct ChatTemplateInputs<'a> {
messages: Vec<Message>,
bos_token: Option<&'a str>,
eos_token: Option<&'a str>,
add_generation_prompt: bool,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug)]
pub(crate) struct ToolCall {
pub id: u32,
pub r#type: String,
pub function: FunctionDefinition,
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct Message {
#[schema(example = "user")]
pub role: String,
#[serde(skip_serializing_if = "Option::is_none")]
#[schema(example = "My name is David and I")]
pub content: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[schema(example = "\"David\"")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tool_calls: Option<ToolCall>,
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct GenerateRequest {
#[schema(example = "My name is Olivier and I")]
pub inputs: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct CompatGenerateRequest {
#[schema(example = "My name is Olivier and I")]
pub inputs: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
#[serde(default)]
#[schema(default = "false")]
pub stream: bool,
}
impl From<CompatGenerateRequest> for GenerateRequest {
fn from(req: CompatGenerateRequest) -> Self {
Self {
inputs: req.inputs,
parameters: req.parameters,
}
}
}
#[derive(Debug, Serialize, ToSchema)]
pub struct PrefillToken {
#[schema(example = 0)]
id: u32,
#[schema(example = "test")]
text: String,
#[schema(nullable = true, example = - 0.34)]
logprob: f32,
}
#[derive(Debug, Serialize, ToSchema, Clone)]
pub struct Token {
#[schema(example = 0)]
id: u32,
#[schema(example = "test")]
text: String,
#[schema(nullable = true, example = - 0.34)]
logprob: f32,
#[schema(example = "false")]
special: bool,
}
#[derive(Debug, Serialize, ToSchema)]
pub struct SimpleToken {
#[schema(example = 0)]
id: u32,
#[schema(example = "test")]
text: String,
#[schema(example = 0)]
start: usize,
#[schema(example = 2)]
stop: usize,
}
#[derive(Serialize, ToSchema)]
#[serde(rename_all(serialize = "snake_case"))]
#[schema(example = "Length")]
pub(crate) enum FinishReason {
#[schema(rename = "length")]
Length,
#[serde(rename = "eos_token")]
#[schema(rename = "eos_token")]
EndOfSequenceToken,
#[schema(rename = "stop_sequence")]
StopSequence,
}
impl std::fmt::Display for FinishReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FinishReason::Length => write!(f, "length"),
FinishReason::EndOfSequenceToken => write!(f, "eos_token"),
FinishReason::StopSequence => write!(f, "stop_sequence"),
}
}
}
#[derive(Serialize, ToSchema)]
pub(crate) struct BestOfSequence {
#[schema(example = "test")]
pub generated_text: String,
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
pub prefill: Vec<PrefillToken>,
pub tokens: Vec<Token>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Vec<Token>>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct Details {
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
pub prefill: Vec<PrefillToken>,
pub tokens: Vec<Token>,
#[serde(skip_serializing_if = "Option::is_none")]
pub best_of_sequences: Option<Vec<BestOfSequence>>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Vec<Token>>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct GenerateResponse {
#[schema(example = "test")]
pub generated_text: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub details: Option<Details>,
}
#[derive(Serialize, ToSchema)]
#[serde(transparent)]
pub(crate) struct TokenizeResponse(Vec<SimpleToken>);
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamDetails {
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamResponse {
pub index: u32,
pub token: Token,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Token>,
#[schema(nullable = true, default = "null", example = "test")]
pub generated_text: Option<String>,
#[schema(nullable = true, default = "null")]
pub details: Option<StreamDetails>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct ErrorResponse {
pub error: String,
pub error_type: String,
}
#[cfg(test)]
mod tests {
use super::*;
use tokenizers::Tokenizer;
pub(crate) async fn get_tokenizer() -> Tokenizer {
let api = hf_hub::api::sync::Api::new().unwrap();
let repo = api.model("gpt2".to_string());
let filename = repo.get("tokenizer.json").unwrap();
Tokenizer::from_file(filename).unwrap()
}
#[test]
fn test_hub_nested_tokens_tokenizer_config() {
// this is a subset of the tokenizer.json file
// in this case we expect the tokens to be encoded as simple strings
let json_content = r#"{
"chat_template": "test",
"bos_token": "<ïœbeginâofâsentenceïœ>",
"eos_token": "<ïœendâofâsentenceïœ>"
}"#;
let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();
// check that we successfully parsed the tokens
assert_eq!(config.chat_template, Some("test".to_string()));
assert_eq!(
config.bos_token,
Some("<ïœbeginâofâsentenceïœ>".to_string())
);
assert_eq!(config.eos_token, Some("<ïœendâofâsentenceïœ>".to_string()));
// in this case we expect the tokens to be encoded as structured tokens
// we want the content of the structured token
let json_content = r#"{
"chat_template": "test",
"bos_token": {
"__type": "AddedToken",
"content": "<ïœbeginâofâsentenceïœ>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"__type": "AddedToken",
"content": "<ïœendâofâsentenceïœ>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}"#;
let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();
// check that we successfully parsed the tokens
assert_eq!(config.chat_template, Some("test".to_string()));
assert_eq!(
config.bos_token,
Some("<ïœbeginâofâsentenceïœ>".to_string())
);
assert_eq!(config.eos_token, Some("<ïœendâofâsentenceïœ>".to_string()));
}
}
| text-generation-inference/router/src/lib.rs/0 | {
"file_path": "text-generation-inference/router/src/lib.rs",
"repo_id": "text-generation-inference",
"token_count": 13923
} | 218 |
#include <ATen/Dispatch.h>
#include <THC/THCAtomics.cuh>
#include <ATen/ATen.h>
#include <torch/torch.h>
#include <vector>
#include <optional>
/**
* Friendly reminder of how multithreading works in CUDA: https://developer.nvidia.com/blog/even-easier-introduction-cuda
* Check example at https://github.com/thomasw21/LinearTransformers/blob/main/model/attention/fast_weight/fast_weight_cuda.cu
**/
// Available in pytorch main
//#define DISPATCH_CASE_FLOATING_TYPES(...) \
// at::AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
// at::AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
// at::AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
// at::AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \
/*
* Forward passes
*/
/**
* cast to fp32 if in fp16 + mask + softmax computation in fp32 + cast back to original dtype
**/
template<typename attention_scores_scalar, int64_t min_kv_length_shard_size_per_thread>
__global__ void forward_masked_softmax_kernel(
const torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> attention_scores, // [B, KV]
const torch::PackedTensorAccessor32<bool, 2, torch::RestrictPtrTraits> mask, // [B, KV]
torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> result, // [B, KV]
const int64_t effective_kv_length,
const dim3 blockDim,
const int64_t rows_per_block,
const int64_t kv_length,
const int64_t batch_size
) {
const auto row_id = threadIdx.x / effective_kv_length;
const auto effective_kv_length_id = threadIdx.x % effective_kv_length;
const auto kv_length_start = effective_kv_length_id * min_kv_length_shard_size_per_thread;
auto kv_length_end_ = (effective_kv_length_id + 1) * min_kv_length_shard_size_per_thread;
kv_length_end_ = (kv_length_end_ > kv_length) ? kv_length : kv_length_end_;
const auto kv_length_end = kv_length_end_;
const auto batch_id = blockIdx.x * rows_per_block + row_id;
// We need 2 float storage for each row, one for max computation, the other for normalizing exponential
extern __shared__ float temp_storage[];
const auto row_id_mem_offset = row_id * 2;
if (effective_kv_length_id == 0) {
temp_storage[row_id_mem_offset] = -std::numeric_limits<float>::infinity();
temp_storage[row_id_mem_offset + 1] = 0;
}
__syncthreads();
// Compute mask and max
if (batch_id < batch_size) {
float thread_max = -std::numeric_limits<float>::infinity();
for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) {
if (mask[batch_id][kv_length_id] == 0) {
const float candidate = attention_scores[batch_id][kv_length_id];
thread_max = (thread_max < candidate) ? candidate : thread_max;
}
}
if (thread_max != -std::numeric_limits<float>::infinity()) {
// TODO @thomasw21 with more memory we can probably compute a much faster `max-reduce` in parallel O(ln(n)) operations in each memory slot
gpuAtomicMax(&temp_storage[row_id_mem_offset], thread_max);
}
}
__syncthreads();
// Compute exp(elt - max) masked
float exponential[min_kv_length_shard_size_per_thread];
if (batch_id < batch_size) {
float thread_add = 0;
for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) {
if (mask[batch_id][kv_length_id] == 0) {
exponential[kv_length_id - kv_length_start] = std::exp(static_cast<float>(attention_scores[batch_id][kv_length_id]) - temp_storage[row_id_mem_offset]);
thread_add = thread_add + exponential[kv_length_id - kv_length_start];
} else {
exponential[kv_length_id - kv_length_start] = 0.;
}
}
if (thread_add > 0) {
// TODO @thomasw21 with more memory we can probably compute a much faster `sum-reduce` in parallel O(ln(n)) operations in each memory slot
gpuAtomicAdd(&temp_storage[row_id_mem_offset + 1], thread_add);
}
}
__syncthreads();
// Compute softmax
if (batch_id < batch_size) {
// If sum of all exponential is 0, we set the softmax values to 0
if (temp_storage[row_id_mem_offset + 1] == 0.) {
for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) {
result[batch_id][kv_length_id] = 0.;
}
} else {
for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) {
result[batch_id][kv_length_id] = static_cast<attention_scores_scalar>(exponential[kv_length_id - kv_length_start] / temp_storage[row_id_mem_offset + 1]);
}
}
}
}
#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
std::tuple<at::Tensor, std::optional<std::vector<at::Tensor>>, at::Tensor> forward(
const at::Tensor query,
const at::Tensor key,
const at::Tensor value,
const std::optional<std::vector<at::Tensor>> layer_past,
const at::Tensor attention_mask,
const std::optional<at::Tensor> head_mask,
const float inv_norm_factor,
const int num_heads,
const bool use_cache
) {
auto query_layer = query;
auto key_layer = key;
auto value_layer = value;
if (layer_past) {
const auto past_key = (*layer_past).at(0);
const auto past_value = (*layer_past).at(1);
key_layer = at::cat({past_key, key_layer}, 2);
value_layer = at::cat({past_value, value_layer}, 2);
}
std::optional<std::vector<at::Tensor>> present;
if (use_cache) {
present = {key_layer, value_layer};
} else {
present = {};
}
const auto batch_size = query_layer.size(0);
const auto q_length = query_layer.size(2);
const auto attn_head_size = query_layer.size(3);
const auto batch_size_times_num_heads = batch_size * num_heads;
const auto kv_length = key_layer.size(2);
const auto query_view = query_layer.reshape({batch_size_times_num_heads, q_length, attn_head_size});
auto key_view = key_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size}).transpose(1, 2);
auto value_view = value_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size});
auto query_scaled = query_view * inv_norm_factor;
auto attention_scores = at::bmm(query_scaled, key_view);
// Computing `optionally_cast_fp16_to_fp32 + masked_fill + softmax + cast_to_intial_dtype`
at::Tensor attention_probs;
if (true) {
// TODO @thomasw21: it's easier to think of attention_scores as 2D tensors
const auto attention_scores_2d = attention_scores.view({batch_size_times_num_heads * q_length, kv_length});
const auto attention_mask_2d = attention_mask.view({batch_size_times_num_heads * q_length, kv_length});
// Custom kernel
attention_probs = at::empty_like(attention_scores_2d);
// Check that inputs and contiguous + cuda tensors
CHECK_INPUT(attention_scores_2d);
CHECK_INPUT(attention_mask_2d);
// TODO @thomas21: change by to this as it's cleaner when pytorch 1.13 comes out
// DISPATCH_CASE_FLOATING_TYPES(attention_scores.scalar_type(), "masked_softmax", [&] {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, attention_scores.scalar_type(), "masked_softmax", [&] {
/*
* Understanding how GPUs work: https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/
* A100 specifications: https://images.nvidia.com/aem-dam/en-zz/Solutions/data-center/nvidia-ampere-architecture-whitepaper.pdf
* - SMs: 108
* - TPCs: 56 (What's that?)
* - Memory size: 40 GB
* - L2 Cache size: 40960 KB (shared across all SMs)
* - L1/Shared memory size: 192 KB (shared across all threads within a SM)
* - Max Threads / SM: 2048
* - Max Thread Blocks / SM: 32
*/
/*
* We should split [batch_size_times_num_heads_block, q_length] in seperate blocks and [batch_size_times_num_heads_block_size, kv_length] a single block
* with multiple threads as we need to `sync_threads` to run exponential sum.
* We maximise the usage of threads within a single block
*/
// TODO @thomasw21 figure out everything warp related:
// - why do they have to be power of 2
// TODO @thomas21 check why everyone is setting 1024 when officially it's 2048
const auto MAX_THREADS_PER_SM = 1024;
// TODO @thomasw21 figure out how to have longer sequences, currently the maximum is `max_kv_length = MAX_THREADS_PER_SM * MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD`
const auto MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD = 4;
// `effective_kv_length = ceil(kv_length / MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD)`
const auto effective_kv_length = (kv_length - 1)/ MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD + 1;
const auto rows_per_block = MAX_THREADS_PER_SM / effective_kv_length;
const auto num_blocks = (batch_size_times_num_heads * q_length - 1) / rows_per_block + 1;
const dim3 gridDim(num_blocks); // Number of blocks that run
const dim3 blockDim(MAX_THREADS_PER_SM); // Number of threads that run per block
const int shared_mem_forward = rows_per_block * 2 * sizeof(float);
// 192 * 2 ** 10
// const auto MAX_L1_MEMORY = 196608;
// const auto MAX_SMs = 108;
// TORCH_CHECK(batch_size_times_num_heads * q_length <= MAX_L1_MEMORY, "Shared memory exceeds 192KB limitation.");
// TORCH_CHECK(gridDim.x * gridDim.y * gridDim.z <= MAX_SMs, "A100s only have 108 SMs. Raising as require blocks is bigger.");
// TORCH_CHECK(blockDim.x * blockDim.y * blockDim.z <= MAX_THREADS_PER_SM, "A100s only have 2048 threads per block. Raising as require requested threads is higher.");
forward_masked_softmax_kernel<scalar_t, MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD><<<gridDim, blockDim, shared_mem_forward>>>(
attention_scores_2d.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
attention_mask_2d.packed_accessor32<bool, 2, torch::RestrictPtrTraits>(),
attention_probs.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
effective_kv_length,
blockDim,
rows_per_block,
kv_length,
batch_size_times_num_heads * q_length
);
});
attention_probs = attention_probs.view({batch_size_times_num_heads, q_length, kv_length});
} else {
// Pytorch C++ API
auto input_dtype = attention_scores.scalar_type();
if (input_dtype == at::ScalarType::Float) {
attention_scores = attention_scores.to(at::ScalarType::Float);
};
// TODO @thomasw21 Figure out how to get minimum value
auto attn_weights = attention_scores.masked_fill_(attention_mask, -1e34);
attention_probs = attn_weights.softmax(-1, at::ScalarType::Float).to(input_dtype);
}
auto context_layer = attention_probs.bmm(value_view);
// `_merge_heads`
context_layer = context_layer.view({batch_size, num_heads, q_length, attn_head_size});
context_layer = context_layer.permute({0, 2, 1, 3});
context_layer = context_layer.reshape({batch_size, q_length, attn_head_size * num_heads});
return std::make_tuple(context_layer, present, attention_probs);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"forward",
&forward,
"GPT-Neox attention mechanism forward (CUDA)"
);
}
| text-generation-inference/server/custom_kernels/custom_kernels/fused_attention_cuda.cu/0 | {
"file_path": "text-generation-inference/server/custom_kernels/custom_kernels/fused_attention_cuda.cu",
"repo_id": "text-generation-inference",
"token_count": 5265
} | 219 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _util_cuh
#define _util_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#if defined(USE_ROCM)
#define cudaUnspecified hipErrorUnknown
#else
#define cudaUnspecified cudaErrorApiFailureBase
#endif
// React to failure on return code != cudaSuccess
#define _cuda_check(fn) \
do { \
{_cuda_err = fn;} \
if (_cuda_err != cudaSuccess) goto _cuda_fail; \
} while(false)
// React to failure on return code == 0
#define _alloc_check(fn) \
do { \
if (!(fn)) { _cuda_err = cudaUnspecified; goto _cuda_fail; } \
else _cuda_err = cudaSuccess; \
} while(false)
#endif
| text-generation-inference/server/exllama_kernels/exllama_kernels/util.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/util.cuh",
"repo_id": "text-generation-inference",
"token_count": 283
} | 220 |
#ifndef _qdq_6_cuh
#define _qdq_6_cuh
#include "qdq_util.cuh"
#include "../../config.h"
#if QMODE_6BIT == 1
// Not implemented
#else
__forceinline__ __device__ void shuffle_6bit_16
(
uint32_t* q,
int stride
)
{
}
__forceinline__ __device__ void dequant_6bit_16
(
const uint32_t q_0,
const uint32_t q_1,
const uint32_t q_2,
half2 (&dq)[8],
int stride
)
{
half dqh[16];
for (int i = 0; i < 5; i++) dqh[ i] = dq_ns(exb( q_0, i * 6 , 0x3f), 32);
dqh[ 5 ] = dq_ns(exb(q_1, q_0, 30, 0x3f), 32);
for (int i = 0; i < 4; i++) dqh[ 6 + i] = dq_ns(exb( q_1, i * 6 + 4, 0x3f), 32);
dqh[10 ] = dq_ns(exb(q_2, q_1, 28, 0x3f), 32);
for (int i = 0; i < 5; i++) dqh[11 + i] = dq_ns(exb( q_2, i * 6 + 2, 0x3f), 32);
for (int i = 0; i < 8; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]);
}
#endif
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_6.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_6.cuh",
"repo_id": "text-generation-inference",
"token_count": 571
} | 221 |
import pytest
import torch
from copy import copy
from transformers import AutoTokenizer
from text_generation_server.pb import generate_pb2
from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch
@pytest.fixture(scope="session")
def mt0_small_tokenizer():
tokenizer = AutoTokenizer.from_pretrained(
"bigscience/mt0-small", padding_side="left"
)
tokenizer.bos_token_id = 0
return tokenizer
@pytest.fixture(scope="session")
def default_seq2seq_lm():
return Seq2SeqLM("bigscience/mt0-small")
@pytest.fixture
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
return generate_pb2.Request(
id=0,
inputs="Test",
prefill_logprobs=True,
truncate=100,
parameters=default_pb_parameters,
stopping_parameters=default_pb_stop_parameters,
)
@pytest.fixture
def default_pb_batch(default_pb_request):
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
@pytest.fixture
def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer):
return Seq2SeqLMBatch.from_pb(
default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu")
)
@pytest.fixture
def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer):
req_0 = copy(default_pb_request)
req_0.id = 1
req_1 = default_pb_request
req_1.id = 2
req_1.stopping_parameters.max_new_tokens = 5
batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
return Seq2SeqLMBatch.from_pb(
batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu")
)
def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch):
batch = default_seq2seq_lm_batch
sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
assert batch.batch_id == default_pb_batch.id
assert batch.requests == default_pb_batch.requests
assert batch.input_ids.shape == (default_pb_batch.size, sequence_length)
assert batch.input_ids[0][-2] == 4268
assert batch.input_ids[0][-1] == 1
assert torch.all(batch.input_ids[0][:-2] == 0)
assert torch.all(batch.attention_mask[0][-2:] == 1)
assert torch.all(batch.attention_mask[0][:-2] == 0)
assert len(batch.decoder_input_ids) == default_pb_batch.size
assert batch.decoder_attention_mask is None
assert batch.encoder_last_hidden_state is None
assert batch.past_key_values is None
assert batch.input_lengths == [2]
assert batch.decoder_input_lengths == [1]
assert len(batch) == default_pb_batch.size
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
assert batch.max_input_length == batch.input_lengths[0]
assert batch.max_decoder_input_length == batch.decoder_input_lengths[0]
def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch):
with pytest.raises(ValueError):
Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch])
def test_seq2seq_lm_batch_type(default_seq2seq_lm):
assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch
def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch):
sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
generations, next_batch, _ = default_seq2seq_lm.generate_token(
default_seq2seq_lm_batch
)
assert len(generations) == len(next_batch)
assert isinstance(next_batch, Seq2SeqLMBatch)
assert next_batch.input_ids is None
assert torch.equal(
next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask
)
assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths
assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length
assert (
next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers
)
assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias
assert len(next_batch.decoder_input_ids) == len(next_batch)
assert next_batch.all_decoder_input_ids[0][0] == 0
assert next_batch.all_decoder_input_ids[0][1] == 259
assert next_batch.decoder_attention_mask is None
assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512)
assert next_batch.decoder_input_lengths == [2]
assert next_batch.max_decoder_input_length == 2
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
)
assert all(
[
p[2].shape == (len(next_batch), 6, sequence_length, 64)
for p in next_batch.past_key_values
]
)
assert all(
[
p[3].shape == (len(next_batch), 6, sequence_length, 64)
for p in next_batch.past_key_values
]
)
assert all([generation.generated_text is None for generation in generations])
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
assert all(
[
token_id.item() == 259
for generation in generations
for token_id in generation.tokens.token_ids
]
)
assert all(
[
token_text == " "
for generation in generations
for token_text in generation.tokens.texts
]
)
assert generations[0].request_id == 0
def test_seq2seq_lm_generate_token_completion(
default_seq2seq_lm, default_seq2seq_lm_batch
):
next_batch = default_seq2seq_lm_batch
for _ in range(6):
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == "a few weeks"
assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
assert generations[0].generated_text.generated_tokens == 7
def test_seq2seq_lm_generate_token_completion_multi(
default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch
):
next_batch = default_multi_requests_seq2seq_lm_batch
for i in range(4):
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[1].generated_text.text == "a few "
assert (
generations[1].request_id
== default_multi_requests_seq2seq_lm_batch.requests[1].id
)
assert generations[1].generated_text.generated_tokens == 5
next_batch = next_batch.filter([next_batch.requests[0].id])
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == "a few weeks"
assert (
generations[0].request_id
== default_multi_requests_seq2seq_lm_batch.requests[0].id
)
assert generations[0].generated_text.generated_tokens == 7
def test_batch_concatenate(
default_seq2seq_lm,
default_seq2seq_lm_batch,
default_multi_requests_seq2seq_lm_batch,
):
next_batch_0 = default_seq2seq_lm_batch
_, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
_, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
next_batch_1 = default_multi_requests_seq2seq_lm_batch
_, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1)
# Copy hidden state because it is removed from the concatenated branches
next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state
next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state
# Clone past_key_values before concatenating to compare after,
# because they are removed from the concatenated batches
next_batch_0_past_key_values = [
[t.clone() for t in layer] for layer in next_batch_0.past_key_values
]
next_batch_1_past_key_values = [
[t.clone() for t in layer] for layer in next_batch_1.past_key_values
]
next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1])
assert next_batch.batch_id == 0
assert torch.equal(
next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0]
)
assert next_batch.all_decoder_input_ids[1][0] == 0
assert next_batch.all_decoder_input_ids[2][0] == 0
assert torch.equal(
next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids
)
assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1)
assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0)
assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0)
assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1)
assert torch.equal(
next_batch.encoder_last_hidden_state[0],
next_batch_0_encoder_last_hidden_state[0, -2:],
)
assert torch.equal(
next_batch.encoder_last_hidden_state[1:],
next_batch_1_encoder_last_hidden_state[:, -2:],
)
assert next_batch.input_lengths == [2, 2, 2]
assert next_batch.decoder_input_lengths == [3, 2, 2]
assert next_batch.max_input_length == 2
assert next_batch.max_decoder_input_length == 3
assert next_batch.requests[0] == next_batch_0.requests[0]
assert next_batch.requests[1:] == next_batch_1.requests
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
assert all(
[p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
assert all(
[p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
for i, past in enumerate(next_batch.past_key_values):
assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0])
assert torch.equal(
next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :]
)
assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0])
assert torch.equal(
next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :]
)
assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0])
assert torch.equal(
next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:]
)
assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0])
assert torch.equal(
next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:]
)
for _ in range(3):
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 3
assert generations[2].generated_text.text == "a few "
assert (
generations[2].request_id
== default_multi_requests_seq2seq_lm_batch.requests[1].id
)
assert generations[2].generated_text.generated_tokens == 5
next_batch = next_batch.filter(
[next_batch.requests[0].id, next_batch.requests[1].id]
)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[0].generated_text.text == "a few weeks"
assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
assert generations[0].generated_text.generated_tokens == 7
next_batch = next_batch.filter([next_batch.requests[1].id])
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == "a few weeks"
assert (
generations[0].request_id
== default_multi_requests_seq2seq_lm_batch.requests[0].id
)
assert generations[0].generated_text.generated_tokens == 7
| text-generation-inference/server/tests/models/test_seq2seq_lm.py/0 | {
"file_path": "text-generation-inference/server/tests/models/test_seq2seq_lm.py",
"repo_id": "text-generation-inference",
"token_count": 5483
} | 222 |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
import os
from shutil import copyfile
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from tokenizers import processors
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from transformers.utils import logging
from text_generation_server.utils import paged_attention, flash_attn
from text_generation_server.utils.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
PositionRotaryEmbedding,
SpeculativeHead,
get_linear,
FastRMSNorm,
)
GemmaTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "tokenizer.model",
"tokenizer_file": "tokenizer.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer.model",
},
"tokenizer_file": {
"hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer_config.json",
},
}
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
# fmt: off
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
correct. If you don't know the answer to a question, please don't share false information."""
# fmt: on
class GemmaTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
slow_tokenizer_class = GemmaTokenizer
padding_side = "left"
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
clean_up_tokenization_spaces=False,
unk_token="<unk>",
bos_token="<bos>",
eos_token="<eos>",
pad_token="<pad>",
add_bos_token=True,
add_eos_token=False,
use_default_system_prompt=False,
**kwargs,
):
super().__init__(
vocab_file=vocab_file,
tokenizer_file=tokenizer_file,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_bos_token=add_bos_token,
add_eos_token=add_eos_token,
use_default_system_prompt=use_default_system_prompt,
**kwargs,
)
self._add_bos_token = add_bos_token
self._add_eos_token = add_eos_token
self.update_post_processor()
self.use_default_system_prompt = use_default_system_prompt
self.vocab_file = vocab_file
@property
def can_save_slow_tokenizer(self) -> bool:
return os.path.isfile(self.vocab_file) if self.vocab_file else False
def update_post_processor(self):
"""
Updates the underlying post processor with the current `bos_token` and `eos_token`.
"""
bos = self.bos_token
bos_token_id = self.bos_token_id
if bos is None and self.add_bos_token:
raise ValueError("add_bos_token = True but bos_token = None")
eos = self.eos_token
eos_token_id = self.eos_token_id
if eos is None and self.add_eos_token:
raise ValueError("add_eos_token = True but eos_token = None")
single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
special_tokens = []
if self.add_bos_token:
special_tokens.append((bos, bos_token_id))
if self.add_eos_token:
special_tokens.append((eos, eos_token_id))
self._tokenizer.post_processor = processors.TemplateProcessing(
single=single, pair=pair, special_tokens=special_tokens
)
@property
def add_eos_token(self):
return self._add_eos_token
@property
def add_bos_token(self):
return self._add_bos_token
@add_eos_token.setter
def add_eos_token(self, value):
self._add_eos_token = value
self.update_post_processor()
@add_bos_token.setter
def add_bos_token(self, value):
self._add_bos_token = value
self.update_post_processor()
def save_vocabulary(
self, save_directory: str, filename_prefix: Optional[str] = None
) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "")
+ VOCAB_FILES_NAMES["vocab_file"],
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
@property
def default_chat_template(self):
raise NotImplementedError
# TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
output = bos_token_id + token_ids_0 + eos_token_id
if token_ids_1 is not None:
output = output + bos_token_id + token_ids_1 + eos_token_id
return output
class GemmaConfig(PretrainedConfig):
def __init__(
self,
vocab_size=256128,
hidden_size=3072,
intermediate_size=24576,
num_hidden_layers=28,
num_attention_heads=16,
num_key_value_heads=16,
head_dim=256,
hidden_act="gelu",
max_position_embeddings=8192,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=True,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.head_dim = head_dim
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class GemmaFastRMSNorm(FastRMSNorm):
@classmethod
def load(cls, prefix, weights, eps=1e-6):
weight = weights.get_tensor(f"{prefix}.weight") + 1
return cls(weight, eps)
def load_attention(config, prefix, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
def _load_gqa(config, prefix: str, weights):
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
quantize=config.quantize,
dim=0,
)
if config.quantize not in ["gptq", "awq"]:
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.head_dim
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
return TensorParallelColumnLinear(
get_linear(weight, bias=None, quantize=config.quantize)
)
class FlashGemmaAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.head_size = config.head_dim
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
):
qkv = self.query_key_value(hidden_states)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
paged_attention.reshape_and_cache(
kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots
)
# output tensor
attn_output = torch.empty_like(query)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
flash_attn.attention(
query,
torch.select(kv, dim=1, index=0),
torch.select(kv, dim=1, index=1),
attn_output,
cu_seqlen_prefill,
max_s,
self.softmax_scale,
)
# Decode
else:
paged_attention.attention(
attn_output,
query,
kv_cache[0],
kv_cache[1],
self.kv_head_mapping,
self.softmax_scale,
block_tables,
input_lengths,
max_s,
)
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
class GemmaMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1])
class FlashGemmaLayer(nn.Module):
def __init__(self, layer_id, config, weights):
super().__init__()
prefix = f"model.layers.{layer_id}"
self.self_attn = FlashGemmaAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = GemmaMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.input_layernorm = GemmaFastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = GemmaFastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
)
# faster post attention rms norm
normed_attn_res_output, attn_res = self.post_attention_layernorm(
attn_output, res
)
mlp_output = self.mlp(normed_attn_res_output)
return mlp_output, attn_res
class FlashGemmaModel(torch.nn.Module):
def __init__(self, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
embed_norm = config.hidden_size**0.5
self.embed_tokens = TensorParallelEmbedding(
prefix="model.embed_tokens", weights=weights
)
self.embed_tokens.weight *= embed_norm
self.layers = nn.ModuleList(
[
FlashGemmaLayer(
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = GemmaFastRMSNorm.load(
prefix="model.norm", weights=weights, eps=config.rms_norm_eps
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
input_lengths: torch.Tensor,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
input_lengths,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashGemmaForCausalLM(torch.nn.Module):
def __init__(self, config, weights):
super().__init__()
self.model = FlashGemmaModel(config, weights)
self.lm_head = SpeculativeHead.load(
config,
prefix="model.embed_tokens" if config.tie_word_embeddings else "lm_head",
weights=weights,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
input_lengths: torch.Tensor,
max_s: int,
lm_head_indices: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 9688
} | 223 |
import torch
import torch.distributed
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn
from torch import nn
from typing import Optional, Tuple, Any
from transformers.configuration_utils import PretrainedConfig
import torch.nn.functional as F
from text_generation_server.utils.layers import (
SpeculativeHead,
TensorParallelEmbedding,
FastRMSNorm,
FastLinear,
)
from einops import rearrange
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
import math
from dataclasses import dataclass
@dataclass
class InferenceParams:
"""Inference parameters that are passed to the main model in order
to efficienly calculate and store the context during inference."""
max_seqlen: int
max_batch_size: int
conv_states: torch.Tensor
ssm_states: torch.Tensor
seqlen_offset: int
class MambaConfig(PretrainedConfig):
def __init__(
self,
vocab_size=50280,
d_model=768,
d_state=16,
n_layer=32,
layer_norm_epsilon=1e-5,
tie_word_embeddings=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
expand=2,
dt_rank="auto",
**kwargs,
):
self.vocab_size = vocab_size
self.n_layer = n_layer
self.layer_norm_epsilon = layer_norm_epsilon
self.d_model = d_model
self.d_inner = d_model * 2
self.d_conv = 4
self.d_state = d_state
self.expand = expand
self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class MambaBlock(nn.Module):
def __init__(self, prefix, config, weights, layer_id):
super().__init__()
self.layer_id = layer_id
self.in_proj = FastLinear.load(config, f"{prefix}.in_proj", weights, bias=False)
self.x_proj = FastLinear.load(config, f"{prefix}.x_proj", weights, bias=False)
self.dt_proj = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=True)
self.dt_proj_no_bias = FastLinear.load(
config, f"{prefix}.dt_proj", weights, bias=False
)
self.out_proj = FastLinear.load(
config, f"{prefix}.out_proj", weights, bias=False
)
self.conv1d = FastLinear.load(config, f"{prefix}.conv1d", weights, bias=True)
self.negA = -torch.exp(weights.get_tensor(f"{prefix}.A_log").float())
self.D = weights.get_tensor(f"{prefix}.D")
self.activation = "silu"
self.dt_rank = config.dt_rank
self.d_state = config.d_state
self.d_conv = config.d_conv
self.act = nn.SiLU()
# inference_params
def forward(self, hidden_states: torch.Tensor, inference_params=None):
if inference_params.seqlen_offset > 0:
conv_state = inference_params.conv_states[self.layer_id]
ssm_state = inference_params.ssm_states[self.layer_id]
out, conv_state, ssm_state = self.step(hidden_states, conv_state, ssm_state)
return out, conv_state, ssm_state
_, seqlen, _ = hidden_states.shape
projected_states = self.in_proj(hidden_states).transpose(1, 2)
# assert projected_states.shape == [batch_size, 2 * dstate, seqlen], f"{projected_states.shape} [{batch_size}, {dstate}, {seqlen}]"
x, z = projected_states.chunk(2, dim=1)
conv_state = F.pad(x, (self.d_conv - seqlen, 0))
x = causal_conv1d_fn(
x=x,
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
)
# We're careful here about the layout, to avoid extra transposes.
# We want dt to have d as the slowest moving dimension
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d)
dt, B, C = torch.split(
x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1
)
dt = self.dt_proj.weight @ dt.t()
dt = rearrange(dt, "d (b l) -> b d l", l=seqlen)
B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
y, last_state = selective_scan_fn(
x,
dt,
self.negA,
B,
C,
self.D.float(),
z=z,
delta_bias=self.dt_proj.bias.float(),
delta_softplus=True,
return_last_state=True,
)
y = rearrange(y, "b d l -> b l d")
attn_outputs = self.out_proj(y)
return attn_outputs, conv_state, last_state
def step(self, hidden_states, conv_state, ssm_state):
xz = self.in_proj(hidden_states.squeeze(1))
x, z = xz.chunk(2, dim=-1) # (B D)
x = causal_conv1d_update(
x,
conv_state,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
x_db = self.x_proj(x) # (B dt_rank+2*d_state)
dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1)
dt = F.linear(dt, self.dt_proj.weight)
A = self.negA
y = selective_state_update(
ssm_state,
x,
dt,
A,
B,
C,
self.D,
z=z,
dt_bias=self.dt_proj.bias,
dt_softplus=True,
)
out = self.out_proj(y)
return out.unsqueeze(1), conv_state.clone(), ssm_state.clone()
class ResidualBlock(nn.Module):
def __init__(self, prefix, config, weights, layer_id):
super().__init__()
self.mamba_block = MambaBlock(
prefix=f"{prefix}.mixer", config=config, weights=weights, layer_id=layer_id
)
self.layer_norm = FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_epsilon
)
def forward(
self,
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor] = None,
inference_params: Optional[Any] = None,
):
residual = (hidden_states + residual) if residual is not None else hidden_states
shape = residual.shape
hidden_states, _ = self.layer_norm(residual.view(-1, shape[-1]))
hidden_states, conv_state, last_ssm_state = self.mamba_block(
hidden_states.view(*shape), inference_params
)
return hidden_states, residual, conv_state, last_ssm_state
class MambaModel(nn.Module):
def __init__(self, config, weights):
super().__init__()
prefix = "backbone"
self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embedding", weights)
self.blocks = nn.ModuleList(
[
ResidualBlock(f"{prefix}.layers.{i}", config, weights, layer_id=i)
for i in range(config.n_layer)
]
)
self.norm_f = FastRMSNorm.load(
f"{prefix}.norm_f", weights, eps=config.layer_norm_epsilon
)
self.lm_head = SpeculativeHead.load(config, f"{prefix}.embedding", weights)
self.config = config
def forward(
self, input_ids: torch.Tensor, inference_params=None, residual=None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.embed_tokens(input_ids)
for i, block in enumerate(self.blocks):
hidden_states, residual, conv_state, ssm_state = block(
hidden_states, residual, inference_params
)
inference_params.conv_states[i].copy_(conv_state)
inference_params.ssm_states[i].copy_(ssm_state)
hidden_states = (
hidden_states + residual if residual is not None else hidden_states
)
hidden_states, _ = self.norm_f(hidden_states.view(-1, hidden_states.size(-1)))
hidden_states = hidden_states.view(residual.shape)
logits, speculative_logits = self.lm_head(hidden_states)
# update the offset for the next inference using these params
inference_params.seqlen_offset += input_ids.size(1)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/mamba_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/mamba_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 4100
} | 224 |
import math
import torch
from typing import Optional
from transformers.models.gpt2 import GPT2TokenizerFast
from text_generation_server.models.cache_manager import BLOCK_SIZE
from text_generation_server.models.flash_mistral import (
BaseFlashMistral,
set_sliding_window,
)
from text_generation_server.models.custom_modeling.flash_starcoder2_modeling import (
Starcoder2Config,
FlashStarcoder2ForCausalLM,
)
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
# Starcoder2 has the same base as Mistral
class FlashStarcoder2(BaseFlashMistral):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
use_medusa: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = torch.float16 if dtype is None else dtype
else:
raise NotImplementedError("FlashStarcoder2 is only available on GPU")
tokenizer = GPT2TokenizerFast.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = Starcoder2Config.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
config.quantize = quantize
config.use_medusa = use_medusa
# Set context windows
if config.sliding_window is not None:
set_sliding_window(
config.sliding_window, math.ceil(config.sliding_window / BLOCK_SIZE)
)
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(filenames, device, dtype, process_group=self.process_group)
if config.quantize in ["gptq", "awq"]:
weights._set_gptq_params(model_id, revision)
model = FlashStarcoder2ForCausalLM(config, weights)
self.cuda_graphs = {}
torch.distributed.barrier(group=self.process_group)
super(BaseFlashMistral, self).__init__(
model=model,
tokenizer=tokenizer,
num_layers=len(model.model.layers),
num_kv_heads=model.model.num_key_value_heads,
head_size=model.model.head_size,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
sliding_window=config.sliding_window,
)
| text-generation-inference/server/text_generation_server/models/flash_starcoder2.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/flash_starcoder2.py",
"repo_id": "text-generation-inference",
"token_count": 1248
} | 225 |
import os
import torch
import torch.distributed
from torch import nn
from torch.nn import functional as F
from typing import List, Tuple, Optional
from loguru import logger
from functools import lru_cache
HAS_BITS_AND_BYTES = True
try:
import bitsandbytes as bnb
from bitsandbytes.nn import Int8Params, Params4bit
except ImportError:
HAS_BITS_AND_BYTES = False
from accelerate import init_empty_weights
from text_generation_server.utils.gptq.quant_linear import QuantLinear
from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM
from text_generation_server.utils.log import log_once
HAS_AWQ = True
try:
from text_generation_server.utils.awq.quantize.qmodule import WQLinear
except ImportError:
HAS_AWQ = False
try:
major, _minor = torch.cuda.get_device_capability()
except Exception:
major = 1
HAS_EXLLAMA = False
CAN_EXLLAMA = major >= 8 or IS_ROCM_SYSTEM
V2 = os.getenv("EXLLAMA_VERSION", "2") == "2"
# if V2 and int(os.getenv("WORLD_SIZE", "1")) > 1:
# V2 = False
# log_once(
# logger.warning,
# "Disabling exllama v2 and using v1 instead because there are issues when sharding",
# )
if os.getenv("DISABLE_EXLLAMA") == "True":
HAS_EXLLAMA = False
elif CAN_EXLLAMA:
try:
if V2:
from text_generation_server.utils.gptq.exllamav2 import (
QuantLinear as ExllamaQuantLinear,
create_exllama_buffers,
set_device,
)
HAS_EXLLAMA = "2"
else:
from text_generation_server.utils.gptq.exllama import (
Ex4bitLinear as ExllamaQuantLinear,
create_exllama_buffers,
set_device,
)
HAS_EXLLAMA = "1"
except ImportError:
pass
HAS_EETQ = False
try:
from EETQ import quant_weights, w8_a16_gemm
HAS_EETQ = True
except ImportError:
pass
# Monkey patching
@classmethod
def load_layer_norm(cls, prefix, weights, eps):
weight = weights.get_tensor(f"{prefix}.weight")
bias = weights.get_tensor(f"{prefix}.bias")
with init_empty_weights():
ln = cls(weight.shape, eps=eps)
ln.weight = nn.Parameter(weight)
ln.bias = nn.Parameter(bias)
return ln
@classmethod
def load_layer_norm_no_bias(cls, prefix, weights, eps):
weight = weights.get_tensor(f"{prefix}.weight")
with init_empty_weights():
ln = cls(weight.shape, eps=eps)
ln.weight = nn.Parameter(weight)
ln.bias = None
return ln
@classmethod
def load_conv2d(cls, prefix, weights, in_channels, out_channels, kernel_size, stride):
weight = weights.get_tensor(f"{prefix}.weight")
bias = weights.get_tensor(f"{prefix}.bias")
with init_empty_weights():
conv2d = cls(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
)
conv2d.weight = nn.Parameter(weight)
conv2d.bias = nn.Parameter(bias)
return conv2d
@classmethod
def load_conv2d_no_bias(
cls, prefix, weights, in_channels, out_channels, kernel_size, stride
):
weight = weights.get_tensor(f"{prefix}.weight")
with init_empty_weights():
conv2d = cls(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
)
conv2d.weight = nn.Parameter(weight)
conv2d.bias = None
return conv2d
torch.nn.Conv2d.load = load_conv2d
torch.nn.Conv2d.load_no_bias = load_conv2d_no_bias
torch.nn.LayerNorm.load = load_layer_norm
torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias
class FastLinear(nn.Module):
def __init__(
self,
weight,
bias,
) -> None:
super().__init__()
self.weight = nn.Parameter(weight)
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.bias = None
@classmethod
def load(cls, config, prefix: str, weights, bias: bool):
weight = weights.get_tensor(f"{prefix}.weight")
if bias:
bias = weights.get_tensor(f"{prefix}.bias")
else:
bias = None
return cls(weight, bias)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.weight, self.bias)
class EETQLinear(nn.Module):
def __init__(
self,
weight,
bias,
) -> None:
super().__init__()
device = weight.device
weight = torch.t(weight).contiguous().cpu()
weight, scale = quant_weights(weight, torch.int8, False)
self.weight = weight.cuda(device)
self.scale = scale.cuda(device)
self.bias = bias.cuda(device) if bias is not None else None
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = w8_a16_gemm(input, self.weight, self.scale)
output = output + self.bias if self.bias is not None else output
return output
class Linear8bitLt(nn.Module):
def __init__(
self,
weight,
bias,
has_fp16_weights=True,
memory_efficient_backward=False,
threshold=0.0,
index=None,
):
super().__init__()
assert (
not memory_efficient_backward
), "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
# Necessary for stacked layers
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(
weight.data,
has_fp16_weights=has_fp16_weights,
requires_grad=has_fp16_weights,
)
self.weight.cuda(weight.device)
self.bias = bias
def init_8bit_state(self):
self.state.CB = self.weight.CB
self.state.SCB = self.weight.SCB
self.weight.CB = None
self.weight.SCB = None
def forward(self, x: torch.Tensor):
self.state.is_training = self.training
if self.weight.CB is not None:
self.init_8bit_state()
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
if not self.state.has_fp16_weights:
if self.state.CB is not None and self.state.CxB is not None:
# we converted 8-bit row major to turing/ampere format in the first inference pass
# we no longer need the row-major weight
del self.state.CB
self.weight.data = self.state.CxB
return out
class Linear4bit(nn.Module):
def __init__(self, weight, bias, quant_type):
super().__init__()
self.weight = Params4bit(
weight.data,
requires_grad=False,
compress_statistics=True,
quant_type=quant_type,
)
self.compute_dtype = None
self.weight.cuda(weight.device)
self.bias = bias
def forward(self, x: torch.Tensor):
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
if getattr(self.weight, "quant_state", None) is None:
print(
"FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first."
)
inp_dtype = x.dtype
if self.compute_dtype is not None:
x = x.to(self.compute_dtype)
bias = None if self.bias is None else self.bias.to(self.compute_dtype)
out = bnb.matmul_4bit(
x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state
)
out = out.to(inp_dtype)
return out
@lru_cache(1)
def warn_deprecate_bnb():
logger.warning(
"Bitsandbytes 8bit is deprecated, using `eetq` is a drop-in replacement, and has much better performnce"
)
def get_linear(weight, bias, quantize):
if quantize is None:
linear = FastLinear(weight, bias)
elif quantize == "eetq":
if HAS_EETQ:
linear = EETQLinear(weight, bias)
else:
raise ImportError(
"Please install EETQ from https://github.com/NetEase-FuXi/EETQ"
)
elif quantize == "bitsandbytes":
warn_deprecate_bnb()
linear = Linear8bitLt(
weight,
bias,
has_fp16_weights=False,
threshold=6.0,
)
if bias is not None:
linear.bias = nn.Parameter(bias)
elif quantize == "bitsandbytes-fp4":
linear = Linear4bit(
weight,
bias,
quant_type="fp4",
)
elif quantize == "bitsandbytes-nf4":
linear = Linear4bit(
weight,
bias,
quant_type="nf4",
)
elif quantize == "gptq":
try:
qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama = weight
except Exception:
raise NotImplementedError(
f"The passed weight is not `gptq` compatible, loader needs to be updated."
)
if use_exllama:
linear = ExllamaQuantLinear(
qweight, qzeros, scales, g_idx, bias, bits, groupsize
)
else:
linear = QuantLinear(
qweight,
qzeros,
scales,
g_idx,
bias,
bits,
groupsize,
)
elif quantize == "awq":
try:
qweight, qzeros, scales, _, bits, groupsize, _ = weight
except Exception:
raise NotImplementedError(
f"The passed weight is not `awq` compatible, loader needs to be updated."
)
if IS_ROCM_SYSTEM:
raise NotImplementedError(
"AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead "
"to use Exllama/GPTQ kernels for AWQ inference."
)
if not HAS_AWQ:
raise NotImplementedError(
"You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly"
)
linear = WQLinear(
w_bit=bits,
group_size=groupsize,
qweight=qweight,
qzeros=qzeros,
scales=scales,
bias=bias is not None,
)
else:
raise NotImplementedError(f"Quantization `{quantize}` is not implemented yet.")
return linear
class SuperLayer(nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear.forward(x)
class ResBlock(torch.nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.linear = FastLinear.load(
config, prefix=f"{prefix}.linear", weights=weights, bias=True
)
self.act = torch.nn.SiLU()
def forward(self, x):
return x + self.act(self.linear(x))
class MedusaModel(torch.nn.Module):
def __init__(self, config, weights):
super().__init__()
self.heads = torch.nn.ModuleList(
[
MedusaHead(config, prefix=f"{i}", weights=weights)
for i in range(config["medusa_num_heads"])
]
)
def forward(self, x):
speculative_logits = torch.stack([head(x) for head in self.heads], dim=1)
return speculative_logits
class MedusaHead(torch.nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.blocks = torch.nn.ModuleList(
[
ResBlock(config, prefix=f"{prefix}.{i}", weights=weights)
for i in range(config["medusa_num_layers"])
]
)
n = len(self.blocks)
self.out = FastLinear.load(
config, prefix=f"{prefix}.{n}", weights=weights, bias=False
)
def forward(self, x):
for block in self.blocks:
x = block(x)
x = self.out(x)
return x
class SpeculativeHead(nn.Module):
def __init__(self, lm_head, medusa):
super().__init__()
self.lm_head = lm_head
self.medusa = medusa
@staticmethod
def load(config, prefix: str, weights):
lm_head = TensorParallelHead.load(config, prefix, weights)
use_medusa = config.use_medusa
if use_medusa:
from pathlib import Path
from safetensors import safe_open
import json
medusa_config = str(Path(use_medusa) / "config.json")
filename = str(Path(use_medusa) / "medusa_lm_head.safetensors")
with open(medusa_config, "r") as f:
config = json.load(f)
routing = weights.routing
with safe_open(filename, framework="pytorch") as f:
for k in f.keys():
if k in routing:
raise RuntimeError(
f"Key {k} was found in multiple files: {filename} and {routing[k]}"
)
weights.routing[k] = filename
medusa = MedusaModel(config, weights)
else:
medusa = None
return SpeculativeHead(lm_head, medusa)
def forward(
self, input: torch.Tensor
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
logits = self.lm_head(input)
speculative_logits = self.medusa(input) if self.medusa is not None else None
return logits, speculative_logits
class TensorParallelHead(SuperLayer):
def __init__(self, linear, process_group, should_gather: bool):
super().__init__(linear)
self.process_group = process_group
self.should_gather = should_gather
@staticmethod
def load(config, prefix: str, weights):
if weights.process_group.size() > 1:
try:
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
should_gather = True
except AssertionError:
# If the vocab size is not divisible by number of shards
# just load the entire thing.
weight = weights.get_tensor(f"{prefix}.weight")
should_gather = False
else:
weight = weights.get_tensor(f"{prefix}.weight")
should_gather = False
# GPTQ,AWQ,EETQ don't quantize heads (nor embeddings)
if config.quantize in ["gptq", "awq", "eetq"]:
quantize = None
else:
quantize = config.quantize
return TensorParallelHead(
get_linear(weight, bias=None, quantize=quantize),
process_group=weights.process_group,
should_gather=should_gather,
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
if not self.should_gather:
return super().forward(input)
world_size = self.process_group.size()
if len(input.shape) == 2 and isinstance(self.linear, FastLinear):
out_dim = self.linear.weight.shape[0]
if input.shape[0] == 1:
world_out = input.new_empty(1, out_dim * world_size)
local_out = input.new_empty(1, out_dim)
gather_input = local_out
else:
world_out = input.new_empty(out_dim * world_size, input.shape[0])
gather_input = input.new_empty(out_dim, input.shape[0])
local_out = gather_input.T
torch.mm(input, self.linear.weight.T, out=local_out)
torch.distributed.all_gather_into_tensor(
world_out, gather_input, group=self.process_group
)
if input.shape[0] == 1:
return world_out
return world_out.T
output = super().forward(input)
world_output = [
torch.empty_like(output) for _ in range(self.process_group.size())
]
torch.distributed.all_gather(world_output, output, group=self.process_group)
world_output = torch.cat(world_output, dim=-1)
return world_output
class TensorParallelColumnLinear(SuperLayer):
@classmethod
def load_qkv(cls, config, prefix: str, weights, bias: bool):
"""Specific method when the QKV was joined after the fact"""
weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize)
if bias:
raise NotImplementedError("packed_qkv only implemented for baichuan")
else:
bias = None
linear = get_linear(weight, bias, config.quantize)
return cls(linear)
@classmethod
def load(cls, config, prefix: str, weights, bias: bool):
return cls.load_multi(config, [prefix], weights, bias, dim=0)
@classmethod
def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int):
weight = weights.get_multi_weights_col(
prefixes, quantize=config.quantize, dim=dim
)
if bias:
b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes]
bias = torch.cat(b, dim=dim)
else:
bias = None
linear = get_linear(weight, bias, config.quantize)
return cls(linear)
class TensorParallelRowLinear(SuperLayer):
def __init__(self, linear, process_group):
super().__init__(linear)
self.process_group = process_group
@classmethod
def load(cls, config, prefix: str, weights, bias: bool):
weight = weights.get_multi_weights_row(prefix, quantize=config.quantize)
if bias and weights.process_group.rank() == 0:
# Rank is only on the first rank process
bias = weights.get_tensor(f"{prefix}.bias")
else:
bias = None
return cls(
get_linear(weight, bias, config.quantize),
process_group=weights.process_group,
)
def forward(self, input: torch.Tensor, reduce: bool = True) -> torch.Tensor:
out = super().forward(input)
if self.process_group.size() > 1 and reduce:
torch.distributed.all_reduce(out, group=self.process_group)
return out
class TensorParallelEmbedding(nn.Module):
def __init__(self, prefix: str, weights, reduce=True):
super().__init__()
weight = weights.get_partial_sharded(f"{prefix}.weight", dim=0)
num_embeddings = weights.get_shape(f"{prefix}.weight")[0]
process_group = weights.process_group
world_size = process_group.size()
rank = process_group.rank()
block_size = (num_embeddings + world_size - 1) // world_size
self.min_id = rank * block_size
self.max_id = min(num_embeddings, (rank + 1) * block_size)
self.null_idx = weight.shape[
0
] # Usually block_size, might be less in non even vocab_size.
self.process_group = weights.process_group
self.reduce = reduce
"""Additional 0 entry used for masking"""
self.weight = nn.Parameter(F.pad(weight, (0, 0, 0, 1)))
def forward(self, input: torch.Tensor) -> torch.Tensor:
# default all out of bounds values to `self.null_idx` that will then be mapped to 0
# translate for [0, self.max_id - self.min_id[
input = torch.where(
(self.min_id > input) | (input >= self.max_id),
self.null_idx,
input - self.min_id,
)
out = torch.nn.functional.embedding(input, self.weight)
if self.reduce and self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out
try:
if IS_CUDA_SYSTEM:
import dropout_layer_norm
elif IS_ROCM_SYSTEM:
from vllm import layernorm_ops
else:
dropout_layer_norm = None
class FastLayerNorm(nn.LayerNorm):
def forward(self, hidden_states, residual=None):
if hidden_states.shape[-1] > 8192 or IS_ROCM_SYSTEM:
if residual is not None:
hidden_states += residual
residual = hidden_states
return super(FastLayerNorm, self).forward(hidden_states), residual
else:
(
normed_hidden_states,
residual,
*rest,
) = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
residual,
self.weight,
self.bias,
None,
None,
None,
None,
0.0,
self.eps,
1.0,
0,
None,
False,
False,
)
if residual is None:
residual = hidden_states
return normed_hidden_states, residual
class FastRMSNorm(nn.Module):
def __init__(self, weight: torch.Tensor, eps: float):
super().__init__()
self.weight = nn.Parameter(weight)
self.variance_epsilon = eps
@classmethod
def load(cls, prefix, weights, eps=1e-6):
weight = weights.get_tensor(f"{prefix}.weight")
return cls(weight, eps)
def forward(self, hidden_states, residual=None):
if hidden_states.shape[-1] > 8192:
if residual is not None:
hidden_states += residual
residual = hidden_states
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(
variance + self.variance_epsilon
)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states, residual
elif IS_CUDA_SYSTEM:
# faster post attention rms norm
(
normed_hidden_states,
res,
*rest,
) = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
residual,
self.weight,
None,
None,
None,
None,
None,
0.0,
self.variance_epsilon,
1.0,
0,
None,
False,
True, # Activate RMSNorm
)
if res is None:
res = hidden_states
return normed_hidden_states, res
elif IS_ROCM_SYSTEM:
# We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not.
if residual is not None:
hidden_states += residual
residual = hidden_states
out = torch.empty_like(hidden_states)
layernorm_ops.rms_norm(
out,
hidden_states,
self.weight.data,
self.variance_epsilon,
)
return out, residual
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
except ImportError:
pass
try:
if IS_CUDA_SYSTEM:
from flash_attn.layers.rotary import RotaryEmbedding
import rotary_emb
elif IS_ROCM_SYSTEM:
from vllm import pos_encoding_ops
def _create_inv_freq(dim, base, device):
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)
)
return inv_freq
def _get_rope_config(config):
if os.getenv("ROPE_SCALING", None) is not None:
rope_scaling = {
"type": os.environ["ROPE_SCALING"],
"factor": float(os.environ["ROPE_FACTOR"]),
}
return rope_scaling
return getattr(config, "rope_scaling", None)
class PositionRotaryEmbedding(nn.Module):
def __init__(self, inv_freq, scaling_factor):
super().__init__()
self.inv_freq = inv_freq
self._seq_len_cached = 0
self._cos_cached = None
self._sin_cached = None
self._cos_k_cached = None
self._sin_k_cached = None
self.scaling_factor = scaling_factor
self.dynamic_args = None
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
):
# Such controlflows may add some overhead.
if IS_CUDA_SYSTEM:
rotary_dim = cos.shape[-1]
q1 = query[..., :rotary_dim]
q2 = query[..., rotary_dim : 2 * rotary_dim]
rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False)
k1 = key[..., :rotary_dim]
k2 = key[..., rotary_dim : 2 * rotary_dim]
rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False)
elif IS_ROCM_SYSTEM:
# NOTE: On RoCm systems, we use a ROPE implementatation adapted from VLLM which launches a single kernel for both query/key, contrary to flash-attn implementation used on NVIDIA systems.
# Compiling flash-attn rotary on RoCm, it appears hipcc is unable to unroll loops, resulting in an even slower inference compared to eager: https://github.com/pytorch/pytorch/issues/113773
head_size = query.shape[-1]
# Inplace operation, updating query and key.
pos_encoding_ops.rotary_embedding(query, key, head_size, cos, sin, True)
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
@classmethod
def static(cls, config, dim, base, device):
inv_freq = _create_inv_freq(dim, base, device)
scaling_factor = None
rope_scaling = _get_rope_config(config)
if rope_scaling is not None:
scaling_factor = rope_scaling["factor"]
if rope_scaling["type"] == "linear":
pass
elif rope_scaling["type"] == "dynamic":
return DynamicPositionRotaryEmbedding(
dim=dim,
max_position_embeddings=config.max_position_embeddings,
base=base,
device=inv_freq.device,
scaling_factor=scaling_factor,
)
elif rope_scaling["type"] == "yarn":
return YarnPositionRotaryEmbedding(
dim=2 * inv_freq.shape[0],
max_position_embeddings=rope_scaling[
"original_max_position_embeddings"
],
base=10000.0,
device=inv_freq.device,
scaling_factor=scaling_factor,
extrapolation_factor=1,
attn_factor=1,
beta_fast=32,
beta_slow=1,
)
else:
raise NotImplementedError(
f"rope scaling type {rope_scaling['type']} is not implemented or invalid"
)
return cls(inv_freq, scaling_factor)
@classmethod
def load(cls, config, prefix, weights):
# XXX: Always load this in float32 !
dtype = weights.dtype
weights.dtype = torch.float32
inv_freq = weights.get_tensor(f"{prefix}.inv_freq")
weights.dtype = dtype
scaling_factor = None
rope_scaling = _get_rope_config(config)
if rope_scaling is not None:
scaling_factor = rope_scaling["factor"]
if rope_scaling["type"] == "linear":
pass
elif rope_scaling["type"] == "dynamic":
return DynamicPositionRotaryEmbedding(
dim=2 * inv_freq.shape[0],
max_position_embeddings=config.max_position_embeddings,
base=10000.0,
device=inv_freq.device,
scaling_factor=scaling_factor,
)
elif rope_scaling["type"] == "yarn":
return YarnPositionRotaryEmbedding(
dim=2 * inv_freq.shape[0],
max_position_embeddings=rope_scaling[
"original_max_position_embeddings"
],
base=10000.0,
device=inv_freq.device,
scaling_factor=scaling_factor,
extrapolation_factor=1,
attn_factor=1,
beta_fast=32,
beta_slow=1,
)
else:
raise NotImplementedError(
f"rope scaling type {rope_scaling['type']} is not implemented or invalid"
)
return cls(inv_freq, scaling_factor)
def _update_cos_sin_cache(self, dtype, device, seqlen):
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if (
seqlen > self._seq_len_cached
or self._cos_cached.device != device
or self._cos_cached.dtype != dtype
):
self._seq_len_cached = seqlen
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
if self.scaling_factor is not None:
t /= self.scaling_factor
# Don't do einsum, it converts fp32 to fp16
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
freqs = torch.outer(t, self.inv_freq.to(device=t.device))
self._cos_cached = torch.cos(freqs).to(dtype)
self._sin_cached = torch.sin(freqs).to(dtype)
def get_cos_sin(
self, position_ids: torch.Tensor, max_s: int, dtype: torch.dtype
):
"""
Return cos and sin for the asked position ids
"""
if IS_ROCM_SYSTEM:
# For RoCm, we always use float cos/sin to avoid a cast.
# For NVIDIA, for some reason, the flash-attn rotary kernel requires cos/sin and query/key to be of same dtype: https://github.com/Dao-AILab/flash-attention/blob/017716451d446e464dde9aca3a3c1ed2209caaa9/csrc/rotary/rotary.cpp#L26
# But later on goes and cast cos/sin to float anyway: https://github.com/Dao-AILab/flash-attention/blob/017716451d446e464dde9aca3a3c1ed2209caaa9/csrc/rotary/rotary_cuda.cu#L29, which looks suboptimal.
dtype = torch.float32
self._update_cos_sin_cache(dtype, position_ids.device, max_s)
cos = torch.index_select(self._cos_cached, 0, position_ids)
sin = torch.index_select(self._sin_cached, 0, position_ids)
# Note: this unsqueeze is not necessary on RoCm + VLLM ROPE implementation, but we leave it as is to avoid yet an other controlflow.
return cos.unsqueeze(1), sin.unsqueeze(1)
class DynamicPositionRotaryEmbedding(PositionRotaryEmbedding):
def __init__(self, dim, max_position_embeddings, base, device, scaling_factor):
inv_freq = _create_inv_freq(dim, base, device)
super().__init__(inv_freq, scaling_factor)
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
def _update_cos_sin_cache(self, dtype, device, seqlen):
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if (
seqlen > self._seq_len_cached
or self._cos_cached.device != device
or self._cos_cached.dtype != dtype
):
if seqlen > self.max_position_embeddings:
newbase = self.base * (
(self.scaling_factor * seqlen / self.max_position_embeddings)
- (self.scaling_factor - 1)
) ** (self.dim / (self.dim - 2))
self.inv_freq = _create_inv_freq(
self.dim, newbase, self.inv_freq.device
)
self._seq_len_cached = seqlen
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
# Don't do einsum, it converts fp32 to fp16
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
freqs = torch.outer(t, self.inv_freq.to(device=t.device))
self._cos_cached = torch.cos(freqs).to(dtype)
self._sin_cached = torch.sin(freqs).to(dtype)
# Inverse dim formula to find dim based on number of rotations
import math
def find_correction_dim(
num_rotations, dim, base=10000, max_position_embeddings=2048
):
return (
dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))
) / (2 * math.log(base))
# Find dim range bounds based on rotations
def find_correction_range(
low_rot, high_rot, dim, base=10000, max_position_embeddings=2048
):
low = math.floor(
find_correction_dim(low_rot, dim, base, max_position_embeddings)
)
high = math.ceil(
find_correction_dim(high_rot, dim, base, max_position_embeddings)
)
return max(low, 0), min(high, dim - 1) # Clamp values just in case
def linear_ramp_mask(min, max, dim):
if min == max:
max += 0.001 # Prevent singularity
linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
ramp_func = torch.clamp(linear_func, 0, 1)
return ramp_func
def get_mscale(scale=1):
if scale <= 1:
return 1.0
return 0.1 * math.log(scale) + 1.0
class YarnPositionRotaryEmbedding(PositionRotaryEmbedding):
def __init__(
self,
dim,
max_position_embeddings,
base,
device,
scaling_factor,
*,
extrapolation_factor,
attn_factor,
beta_fast,
beta_slow,
):
inv_freq = _create_inv_freq(dim, base, device)
super().__init__(inv_freq, scaling_factor)
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
self.extrapolation_factor = extrapolation_factor
self.attn_factor = attn_factor
self.beta_fast = beta_fast
self.beta_slow = beta_slow
self.mscale = float(
get_mscale(self.scaling_factor) * self.attn_factor
) # Get n-d magnitude scaling corrected for interpolation
def _update_cos_sin_cache(self, dtype, device, seqlen):
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if (
seqlen > self._seq_len_cached
or self._cos_cached.device != device
or self._cos_cached.dtype != dtype
):
if seqlen > self.max_position_embeddings:
inv_freq_extrapolation = _create_inv_freq(
self.dim, self.base, self.inv_freq.device
)
freqs = 1.0 / inv_freq_extrapolation
inv_freq_interpolation = 1.0 / (self.scaling_factor * freqs)
low, high = find_correction_range(
self.beta_fast,
self.beta_slow,
self.dim,
self.base,
self.max_position_embeddings,
)
inv_freq_mask = (
1
- linear_ramp_mask(low, high, self.dim // 2).float().to(device)
) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation
inv_freq = (
inv_freq_interpolation * (1 - inv_freq_mask)
+ inv_freq_extrapolation * inv_freq_mask
)
self.inv_freq = inv_freq
self.mscale = float(
get_mscale(self.scaling_factor) * self.attn_factor
) # Get n-d magnitude scaling corrected for interpolation
self._seq_len_cached = seqlen
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
# Don't do einsum, it converts fp32 to fp16
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
freqs = torch.outer(t, self.inv_freq.to(device=t.device))
self._cos_cached = (torch.cos(freqs) * self.mscale).to(dtype)
self._sin_cached = (torch.sin(freqs) * self.mscale).to(dtype)
except ImportError:
pass
| text-generation-inference/server/text_generation_server/utils/layers.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/layers.py",
"repo_id": "text-generation-inference",
"token_count": 19888
} | 226 |
target
.yarn | tokenizers/bindings/node/.prettierignore/0 | {
"file_path": "tokenizers/bindings/node/.prettierignore",
"repo_id": "tokenizers",
"token_count": 5
} | 227 |
{
"name": "tokenizers-darwin-x64",
"version": "0.13.4-rc1",
"os": [
"darwin"
],
"cpu": [
"x64"
],
"main": "tokenizers.darwin-x64.node",
"files": [
"tokenizers.darwin-x64.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/darwin-x64/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/darwin-x64/package.json",
"repo_id": "tokenizers",
"token_count": 268
} | 228 |
{
"name": "tokenizers-win32-ia32-msvc",
"version": "0.13.4-rc1",
"os": [
"win32"
],
"cpu": [
"ia32"
],
"main": "tokenizers.win32-ia32-msvc.node",
"files": [
"tokenizers.win32-ia32-msvc.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/win32-ia32-msvc/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/win32-ia32-msvc/package.json",
"repo_id": "tokenizers",
"token_count": 277
} | 229 |
use crate::decoders::Decoder;
use crate::encoding::{JsEncoding, JsTruncationDirection, JsTruncationStrategy};
use crate::models::Model;
use crate::normalizers::Normalizer;
use crate::pre_tokenizers::PreTokenizer;
use crate::processors::Processor;
use crate::tasks::tokenizer::{DecodeBatchTask, DecodeTask, EncodeBatchTask, EncodeTask};
use crate::trainers::Trainer;
use std::collections::HashMap;
use tokenizers::Model as ModelTrait;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use std::sync::{Arc, RwLock};
use tokenizers as tk;
#[napi]
#[derive(Default)]
pub enum PaddingDirection {
#[default]
Left,
Right,
}
impl From<PaddingDirection> for tk::PaddingDirection {
fn from(w: PaddingDirection) -> Self {
match w {
PaddingDirection::Left => tk::PaddingDirection::Left,
PaddingDirection::Right => tk::PaddingDirection::Right,
}
}
}
impl TryFrom<String> for PaddingDirection {
type Error = Error;
fn try_from(w: String) -> Result<Self> {
match w.as_str() {
"left" => Ok(PaddingDirection::Left),
"right" => Ok(PaddingDirection::Right),
s => Err(Error::from_reason(format!(
"{s:?} is not a valid direction"
))),
}
}
}
#[napi(object)]
#[derive(Default)]
pub struct PaddingOptions {
pub max_length: Option<u32>,
pub direction: Option<Either<String, PaddingDirection>>,
pub pad_to_multiple_of: Option<u32>,
pub pad_id: Option<u32>,
pub pad_type_id: Option<u32>,
pub pad_token: Option<String>,
}
impl TryFrom<PaddingOptions> for tk::PaddingParams {
type Error = Error;
fn try_from(value: PaddingOptions) -> Result<Self> {
let direction = match value.direction {
Some(either) => match either {
Either::A(string) => {
let direction: PaddingDirection = string.try_into()?;
direction.into()
}
Either::B(direction) => direction.into(),
},
None => tk::PaddingDirection::Right,
};
Ok(Self {
pad_to_multiple_of: value.pad_to_multiple_of.map(|s| s as usize),
pad_id: value.pad_id.unwrap_or_default(),
pad_type_id: value.pad_type_id.unwrap_or_default(),
pad_token: value.pad_token.unwrap_or("[PAD]".to_string()),
direction,
strategy: match value.max_length {
Some(length) => tk::PaddingStrategy::Fixed(length as usize),
None => tk::PaddingStrategy::BatchLongest,
},
})
}
}
#[napi(object)]
#[derive(Default)]
pub struct EncodeOptions {
pub is_pretokenized: Option<bool>,
pub add_special_tokens: Option<bool>,
}
#[derive(Default)]
struct EncodeOptionsDef {
// TODO
// is_pretokenized: bool,
add_special_tokens: bool,
}
impl From<EncodeOptions> for EncodeOptionsDef {
fn from(value: EncodeOptions) -> Self {
EncodeOptionsDef {
// TODO
// is_pretokenized: value.is_pretokenized.unwrap_or(false),
add_special_tokens: value.add_special_tokens.unwrap_or(true),
}
}
}
#[napi(object)]
#[derive(Default)]
pub struct TruncationOptions {
pub max_length: Option<u32>,
pub strategy: Option<JsTruncationStrategy>,
pub direction: Option<Either<String, JsTruncationDirection>>,
pub stride: Option<u32>,
}
impl TryFrom<TruncationOptions> for tk::TruncationParams {
type Error = Error;
fn try_from(value: TruncationOptions) -> Result<Self> {
let direction = match value.direction {
Some(either) => match either {
Either::A(string) => {
let direction: JsTruncationDirection = string.try_into()?;
direction.into()
}
Either::B(direction) => direction.into(),
},
None => Default::default(),
};
Ok(Self {
max_length: value.max_length.unwrap_or(0) as usize,
strategy: value.strategy.map(|s| s.into()).unwrap_or_default(),
direction,
stride: value.stride.unwrap_or_default() as usize,
})
}
}
#[napi(object)]
pub struct AddedTokenOptions {
pub single_word: Option<bool>,
pub left_strip: Option<bool>,
pub right_strip: Option<bool>,
pub normalized: Option<bool>,
}
#[napi]
#[derive(Clone)]
pub struct AddedToken {
token: tk::AddedToken,
}
#[napi]
impl AddedToken {
#[napi(constructor)]
pub fn from(token: String, is_special: bool, options: Option<AddedTokenOptions>) -> Self {
let mut token = tk::AddedToken::from(token, is_special);
if let Some(options) = options {
if let Some(sw) = options.single_word {
token = token.single_word(sw);
}
if let Some(ls) = options.left_strip {
token = token.lstrip(ls);
}
if let Some(rs) = options.right_strip {
token = token.rstrip(rs);
}
if let Some(n) = options.normalized {
token = token.normalized(n);
}
}
Self { token }
}
#[napi]
pub fn get_content(&self) -> String {
self.token.content.clone()
}
}
impl From<AddedToken> for tk::AddedToken {
fn from(v: AddedToken) -> Self {
v.token
}
}
type RsTokenizer = tk::TokenizerImpl<Model, Normalizer, PreTokenizer, Processor, Decoder>;
#[napi]
#[derive(Clone)]
pub struct Tokenizer {
pub(crate) tokenizer: Arc<RwLock<RsTokenizer>>,
}
#[napi]
impl Tokenizer {
#[napi(constructor)]
pub fn new(model: &Model) -> Self {
Self {
tokenizer: Arc::new(RwLock::new(tk::TokenizerImpl::new((*model).clone()))),
}
}
#[napi]
pub fn set_pre_tokenizer(&mut self, pre_tokenizer: &PreTokenizer) {
self
.tokenizer
.write()
.unwrap()
.with_pre_tokenizer((*pre_tokenizer).clone());
}
#[napi]
pub fn set_decoder(&mut self, decoder: &Decoder) {
self
.tokenizer
.write()
.unwrap()
.with_decoder((*decoder).clone());
}
#[napi]
pub fn set_model(&mut self, model: &Model) {
self.tokenizer.write().unwrap().with_model((*model).clone());
}
#[napi]
pub fn set_post_processor(&mut self, post_processor: &Processor) {
self
.tokenizer
.write()
.unwrap()
.with_post_processor((*post_processor).clone());
}
#[napi]
pub fn set_normalizer(&mut self, normalizer: &Normalizer) {
self
.tokenizer
.write()
.unwrap()
.with_normalizer((*normalizer).clone());
}
#[napi]
pub fn save(&self, path: String, pretty: Option<bool>) -> Result<()> {
let pretty = pretty.unwrap_or(false);
self
.tokenizer
.read()
.unwrap()
.save(path, pretty)
.map_err(|e| Error::from_reason(format!("{}", e)))
}
#[napi]
pub fn add_added_tokens(&mut self, tokens: Vec<&AddedToken>) -> u32 {
let tokens: Vec<_> = tokens
.into_iter()
.map(|tok| (*tok).clone().into())
.collect();
self.tokenizer.write().unwrap().add_tokens(&tokens) as u32
}
#[napi]
pub fn add_tokens(&mut self, tokens: Vec<String>) -> u32 {
let tokens: Vec<_> = tokens
.into_iter()
.map(|tok| tk::AddedToken::from(tok, false))
.collect();
self.tokenizer.write().unwrap().add_tokens(&tokens) as u32
}
#[napi(ts_return_type = "Promise<JsEncoding>")]
pub fn encode(
&self,
#[napi(ts_arg_type = "InputSequence")] sentence: String,
#[napi(ts_arg_type = "InputSequence | null")] pair: Option<String>,
encode_options: Option<EncodeOptions>,
) -> AsyncTask<EncodeTask<'static>> {
let options: EncodeOptionsDef = encode_options.unwrap_or_default().into();
let input: tk::EncodeInput = match pair {
Some(pair) => (sentence, pair).into(),
None => sentence.into(),
};
AsyncTask::new(EncodeTask {
tokenizer: (*self).clone(),
input: Some(input),
add_special_tokens: options.add_special_tokens,
})
}
#[napi(ts_return_type = "Promise<JsEncoding[]>")]
pub fn encode_batch(
&self,
#[napi(ts_arg_type = "EncodeInput[]")] sentences: Vec<String>,
encode_options: Option<EncodeOptions>,
) -> AsyncTask<EncodeBatchTask<'static>> {
let options: EncodeOptionsDef = encode_options.unwrap_or_default().into();
let inputs: Vec<tk::EncodeInput> = sentences
.into_iter()
.map(|sentence| sentence.into())
.collect();
AsyncTask::new(EncodeBatchTask {
tokenizer: (*self).clone(),
inputs: Some(inputs),
add_special_tokens: options.add_special_tokens,
})
}
#[napi(ts_return_type = "Promise<string>")]
pub fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> AsyncTask<DecodeTask> {
AsyncTask::new(DecodeTask {
tokenizer: (*self).clone(),
ids,
skip_special_tokens,
})
}
#[napi(ts_return_type = "Promise<string[]>")]
pub fn decode_batch(
&self,
ids: Vec<Vec<u32>>,
skip_special_tokens: bool,
) -> AsyncTask<DecodeBatchTask> {
AsyncTask::new(DecodeBatchTask {
tokenizer: (*self).clone(),
ids,
skip_special_tokens,
})
}
#[napi(factory)]
pub fn from_string(s: String) -> Result<Self> {
let tokenizer: tk::tokenizer::TokenizerImpl<
Model,
Normalizer,
PreTokenizer,
Processor,
Decoder,
> = s
.parse()
.map_err(|e| Error::from_reason(format!("{}", e)))?;
Ok(Self {
tokenizer: Arc::new(RwLock::new(tokenizer)),
})
}
#[napi(factory)]
pub fn from_file(file: String) -> Result<Self> {
let tokenizer = tk::tokenizer::TokenizerImpl::from_file(file)
.map_err(|e| Error::from_reason(format!("Error loading from file{}", e)))?;
Ok(Self {
tokenizer: Arc::new(RwLock::new(tokenizer)),
})
}
#[napi]
pub fn add_special_tokens(&mut self, tokens: Vec<String>) {
let tokens: Vec<_> = tokens
.into_iter()
.map(|s| tk::AddedToken::from(s, true))
.collect();
self.tokenizer.write().unwrap().add_special_tokens(&tokens);
}
#[napi]
pub fn set_truncation(
&mut self,
max_length: u32,
options: Option<TruncationOptions>,
) -> Result<()> {
let mut options: tk::TruncationParams = if let Some(options) = options {
options.try_into()?
} else {
Default::default()
};
options.max_length = max_length as usize;
self
.tokenizer
.write()
.unwrap()
.with_truncation(Some(options))
.unwrap();
Ok(())
}
#[napi]
pub fn disable_truncation(&mut self) {
self
.tokenizer
.write()
.unwrap()
.with_truncation(None)
.unwrap();
}
#[napi]
pub fn set_padding(&mut self, options: Option<PaddingOptions>) -> Result<()> {
let options = if let Some(options) = options {
Some(options.try_into()?)
} else {
None
};
self.tokenizer.write().unwrap().with_padding(options);
Ok(())
}
#[napi]
pub fn disable_padding(&mut self) {
self.tokenizer.write().unwrap().with_padding(None);
}
#[napi]
pub fn get_decoder(&self) -> Option<Decoder> {
self.tokenizer.read().unwrap().get_decoder().cloned()
}
#[napi]
pub fn get_normalizer(&self) -> Option<Normalizer> {
self.tokenizer.read().unwrap().get_normalizer().cloned()
}
#[napi]
pub fn get_pre_tokenizer(&self) -> Option<PreTokenizer> {
self.tokenizer.read().unwrap().get_pre_tokenizer().cloned()
}
#[napi]
pub fn get_post_processor(&self) -> Option<Processor> {
self.tokenizer.read().unwrap().get_post_processor().cloned()
}
#[napi]
pub fn get_vocab(&self, with_added_tokens: Option<bool>) -> HashMap<String, u32> {
let with_added_tokens = with_added_tokens.unwrap_or(true);
self.tokenizer.read().unwrap().get_vocab(with_added_tokens)
}
#[napi]
pub fn get_vocab_size(&self, with_added_tokens: Option<bool>) -> u32 {
self.get_vocab(with_added_tokens).len() as u32
}
#[napi]
pub fn id_to_token(&self, id: u32) -> Option<String> {
self.tokenizer.read().unwrap().id_to_token(id)
}
#[napi]
pub fn token_to_id(&self, token: String) -> Option<u32> {
self.tokenizer.read().unwrap().token_to_id(&token)
}
#[napi]
pub fn train(&mut self, files: Vec<String>) -> Result<()> {
let mut trainer: Trainer = self
.tokenizer
.read()
.unwrap()
.get_model()
.model
.as_ref()
.unwrap()
.read()
.unwrap()
.get_trainer()
.into();
self
.tokenizer
.write()
.unwrap()
.train_from_files(&mut trainer, files)
.map_err(|e| Error::from_reason(format!("{}", e)))?;
Ok(())
}
#[napi]
pub fn running_tasks(&self) -> u32 {
std::sync::Arc::strong_count(&self.tokenizer) as u32
}
#[napi]
pub fn post_process(
&self,
encoding: &JsEncoding,
pair: Option<&JsEncoding>,
add_special_tokens: Option<bool>,
) -> Result<JsEncoding> {
let add_special_tokens = add_special_tokens.unwrap_or(true);
Ok(
self
.tokenizer
.read()
.unwrap()
.post_process(
(*encoding).clone().try_into()?,
if let Some(pair) = pair {
Some((*pair).clone().try_into()?)
} else {
None
},
add_special_tokens,
)
.map_err(|e| Error::from_reason(format!("{}", e)))?
.into(),
)
}
}
#[napi(object)]
#[derive(Default)]
pub struct JsFromPretrainedParameters {
pub revision: Option<String>,
pub auth_token: Option<String>,
}
| tokenizers/bindings/node/src/tokenizer.rs/0 | {
"file_path": "tokenizers/bindings/node/src/tokenizer.rs",
"repo_id": "tokenizers",
"token_count": 5701
} | 230 |
import argparse
import glob
from tokenizers import BertWordPieceTokenizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
default=None,
metavar="path",
type=str,
required=True,
help="The files to use as training; accept '**/*.txt' type of patterns \
if enclosed in quotes",
)
parser.add_argument(
"--out",
default="./",
type=str,
help="Path to the output directory, where the files will be saved",
)
parser.add_argument("--name", default="bert-wordpiece", type=str, help="The name of the output vocab files")
args = parser.parse_args()
files = glob.glob(args.files)
if not files:
print(f"File does not exist: {args.files}")
exit(1)
# Initialize an empty tokenizer
tokenizer = BertWordPieceTokenizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=True,
lowercase=True,
)
# And then train
tokenizer.train(
files,
vocab_size=10000,
min_frequency=2,
show_progress=True,
special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"],
limit_alphabet=1000,
wordpieces_prefix="##",
)
# Save the files
tokenizer.save_model(args.out, args.name)
| tokenizers/bindings/python/examples/train_bert_wordpiece.py/0 | {
"file_path": "tokenizers/bindings/python/examples/train_bert_wordpiece.py",
"repo_id": "tokenizers",
"token_count": 472
} | 231 |
# Generated content DO NOT EDIT
class Model:
"""
Base class for all models
The model represents the actual tokenization algorithm. This is the part that
will contain and manage the learned vocabulary.
This class cannot be constructed directly. Please use one of the concrete models.
"""
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class BPE(Model):
"""
An implementation of the BPE (Byte-Pair Encoding) algorithm
Args:
vocab (:obj:`Dict[str, int]`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
merges (:obj:`List[Tuple[str, str]]`, `optional`):
A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]`
cache_capacity (:obj:`int`, `optional`):
The number of words that the BPE cache can contain. The cache allows
to speed-up the process by keeping the result of the merge operations
for a number of words.
dropout (:obj:`float`, `optional`):
A float between 0 and 1 that represents the BPE dropout to use.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
continuing_subword_prefix (:obj:`str`, `optional`):
The prefix to attach to subword units that don't represent a beginning of word.
end_of_word_suffix (:obj:`str`, `optional`):
The suffix to attach to subword units that represent an end of word.
fuse_unk (:obj:`bool`, `optional`):
Whether to fuse any subsequent unknown tokens into a single one
byte_fallback (:obj:`bool`, `optional`):
Whether to use spm byte-fallback trick (defaults to False)
"""
def __init__(
self,
vocab=None,
merges=None,
cache_capacity=None,
dropout=None,
unk_token=None,
continuing_subword_prefix=None,
end_of_word_suffix=None,
fuse_unk=None,
byte_fallback=False,
):
pass
@staticmethod
def from_file(cls, vocab, merge, **kwargs):
"""
Instantiate a BPE model from the given files.
This method is roughly equivalent to doing::
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
bpe = BPE(vocab, merges)
If you don't need to keep the :obj:`vocab, merges` values lying around,
this method is more optimized than manually calling
:meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
merges (:obj:`str`):
The path to a :obj:`merges.txt` file
Returns:
:class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(self, vocab, merges):
"""
Read a :obj:`vocab.json` and a :obj:`merges.txt` files
This method provides a way to read and parse the content of these files,
returning the relevant data structures. If you want to instantiate some BPE models
from memory, this method gives you the expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
merges (:obj:`str`):
The path to a :obj:`merges.txt` file
Returns:
A :obj:`Tuple` with the vocab and the merges:
The vocabulary and merges loaded into memory
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class Unigram(Model):
"""
An implementation of the Unigram algorithm
Args:
vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`):
A list of vocabulary items and their relative score [("am", -0.2442),...]
"""
def __init__(self, vocab, unk_id, byte_fallback):
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class WordLevel(Model):
"""
An implementation of the WordLevel algorithm
Most simple tokenizer model based on mapping tokens to their corresponding id.
Args:
vocab (:obj:`str`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
def __init__(self, vocab, unk_token):
pass
@staticmethod
def from_file(vocab, unk_token):
"""
Instantiate a WordLevel model from the given file
This method is roughly equivalent to doing::
vocab = WordLevel.read_file(vocab_filename)
wordlevel = WordLevel(vocab)
If you don't need to keep the :obj:`vocab` values lying around, this method is
more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to
initialize a :class:`~tokenizers.models.WordLevel`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
Returns:
:class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(vocab):
"""
Read a :obj:`vocab.json`
This method provides a way to read and parse the content of a vocabulary file,
returning the relevant data structures. If you want to instantiate some WordLevel models
from memory, this method gives you the expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
Returns:
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class WordPiece(Model):
"""
An implementation of the WordPiece algorithm
Args:
vocab (:obj:`Dict[str, int]`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
max_input_chars_per_word (:obj:`int`, `optional`):
The maximum number of characters to authorize in a single word.
"""
def __init__(self, vocab, unk_token, max_input_chars_per_word):
pass
@staticmethod
def from_file(vocab, **kwargs):
"""
Instantiate a WordPiece model from the given file
This method is roughly equivalent to doing::
vocab = WordPiece.read_file(vocab_filename)
wordpiece = WordPiece(vocab)
If you don't need to keep the :obj:`vocab` values lying around, this method is
more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to
initialize a :class:`~tokenizers.models.WordPiece`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.txt` file
Returns:
:class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(vocab):
"""
Read a :obj:`vocab.txt` file
This method provides a way to read and parse the content of a standard `vocab.txt`
file as used by the WordPiece Model, returning the relevant data structures. If you
want to instantiate some WordPiece models from memory, this method gives you the
expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.txt` file
Returns:
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
| tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi",
"repo_id": "tokenizers",
"token_count": 7567
} | 232 |
import tokenizers
from argparse import ArgumentParser
import sentencepiece as spm
from collections import Counter
import json
import os
import datetime
try:
from termcolor import colored
has_color = True
except Exception:
has_color = False
def main():
parser = ArgumentParser("SentencePiece parity checker")
parser.add_argument(
"--input-file",
"-i",
type=str,
required=True,
help="Which files do you want to train from",
)
parser.add_argument(
"--model-file",
"-m",
type=str,
required=False,
default=None,
help="Use a pretrained token file",
)
parser.add_argument(
"--model-prefix",
type=str,
default="spm_parity",
help="Model prefix for spm_train",
)
parser.add_argument(
"--vocab-size",
"-v",
type=int,
default=8000,
help="Vocab size for spm_train",
)
parser.add_argument(
"--verbose",
action="store_true",
help="Verbosity",
)
parser.add_argument(
"--train",
action="store_true",
help="Instead of checking the encoder part, we check the trainer part",
)
parser.add_argument(
"--from-spm",
action="store_true",
help="Directly load the spm file with it's own normalizer",
)
args = parser.parse_args()
trained = False
if args.model_file is None:
spm.SentencePieceTrainer.Train(
f"--input={args.input_file} --model_prefix={args.model_prefix}"
f" --character_coverage=1.0"
f" --max_sentence_length=40000"
f" --num_threads=1"
f" --vocab_size={args.vocab_size}"
)
trained = True
args.model_file = f"{args.model_prefix}.model"
try:
if args.train:
check_train(args)
else:
check_encode(args)
finally:
if trained:
os.remove(f"{args.model_prefix}.model")
os.remove(f"{args.model_prefix}.vocab")
def check_train(args):
sp = spm.SentencePieceProcessor()
sp.Load(args.model_file)
tokenizer = tokenizers.SentencePieceUnigramTokenizer()
tokenizer.train(args.input_file, show_progress=False)
spm_tokens = 0
tokenizer_tokens = 0
with open(args.input_file, "r") as f:
for i, line in enumerate(f):
line = line.strip()
ids = sp.EncodeAsIds(line)
encoded = tokenizer.encode(line)
spm_tokens += len(ids)
tokenizer_tokens += len(encoded.ids)
vocab = [0 for i in range(args.vocab_size)]
spm_vocab = [0 for i in range(args.vocab_size)]
for token, index in tokenizer.get_vocab().items():
vocab[index] = token
for i in range(args.vocab_size):
spm_vocab[i] = sp.id_to_piece(i)
# 0 is unk in tokenizers, 0, 1, 2 are unk bos, eos in spm by default.
for i, (token, spm_token) in enumerate(zip(vocab[1:], spm_vocab[3:])):
if token != spm_token:
print(f"First different token is token {i} ({token} != {spm_token})")
break
print(f"Tokenizer used {tokenizer_tokens}, where spm used {spm_tokens}")
assert tokenizer_tokens < spm_tokens, "Our trainer should be at least more efficient than the SPM one"
print("Ok our trainer is at least more efficient than the SPM one")
def check_diff(spm_diff, tok_diff, sp, tok):
if spm_diff == list(reversed(tok_diff)):
# AAA -> AA+A vs A+AA case.
return True
elif len(spm_diff) == len(tok_diff) and tok.decode(spm_diff) == tok.decode(tok_diff):
# Second order OK
# Barrich -> Barr + ich vs Bar + rich
return True
spm_reencoded = sp.encode(sp.decode(spm_diff))
tok_reencoded = tok.encode(tok.decode(spm_diff)).ids
if spm_reencoded != spm_diff and spm_reencoded == tok_reencoded:
# Type 3 error.
# Snehagatha ->
# Sne, h, aga, th, a
# Sne, ha, gat, ha
# Encoding the wrong with sp does not even recover what spm gave us
# It fits tokenizer however...
return True
return False
def check_details(line, spm_ids, tok_ids, sp, tok):
# Encoding can be the same with same result AAA -> A + AA vs AA + A
# We can check that we use at least exactly the same number of tokens.
for i, (spm_id, tok_id) in enumerate(zip(spm_ids, tok_ids)):
if spm_id != tok_id:
break
first = i
for i, (spm_id, tok_id) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))):
if spm_id != tok_id:
break
last = len(spm_ids) - i
spm_diff = spm_ids[first:last]
tok_diff = tok_ids[first:last]
if check_diff(spm_diff, tok_diff, sp, tok):
return True
if last - first > 5:
# We might have twice a single problem, attempt to subdivide the disjointed tokens into smaller problems
spms = Counter(spm_ids[first:last])
toks = Counter(tok_ids[first:last])
removable_tokens = {spm_ for (spm_, si) in spms.items() if toks.get(spm_, 0) == si}
min_width = 3
for i in range(last - first - min_width):
if all(spm_ids[first + i + j] in removable_tokens for j in range(min_width)):
possible_matches = [
k
for k in range(last - first - min_width)
if tok_ids[first + k : first + k + min_width] == spm_ids[first + i : first + i + min_width]
]
for j in possible_matches:
if check_diff(spm_ids[first : first + i], tok_ids[first : first + j], sp, tok) and check_details(
line,
spm_ids[first + i : last],
tok_ids[first + j : last],
sp,
tok,
):
return True
print(f"Spm: {[tok.decode([spm_ids[i]]) for i in range(first, last)]}")
try:
print(f"Tok: {[tok.decode([tok_ids[i]]) for i in range(first, last)]}")
except Exception:
pass
ok_start = tok.decode(spm_ids[:first])
ok_end = tok.decode(spm_ids[last:])
wrong = tok.decode(spm_ids[first:last])
print()
if has_color:
print(f"{colored(ok_start, 'grey')}{colored(wrong, 'red')}{colored(ok_end, 'grey')}")
else:
print(wrong)
return False
def check_encode(args):
sp = spm.SentencePieceProcessor()
sp.Load(args.model_file)
if args.from_spm:
tok = tokenizers.SentencePieceUnigramTokenizer.from_spm(args.model_file)
else:
vocab = [(sp.id_to_piece(i), sp.get_score(i)) for i in range(sp.piece_size())]
unk_id = sp.unk_id()
tok = tokenizers.SentencePieceUnigramTokenizer(vocab, unk_id)
perfect = 0
imperfect = 0
wrong = 0
now = datetime.datetime.now
spm_total_time = datetime.timedelta(seconds=0)
tok_total_time = datetime.timedelta(seconds=0)
with open(args.input_file, "r", encoding="utf-8-sig") as f:
for i, line in enumerate(f):
line = line.strip()
start = now()
ids = sp.EncodeAsIds(line)
spm_time = now()
encoded = tok.encode(line)
tok_time = now()
spm_total_time += spm_time - start
tok_total_time += tok_time - spm_time
if args.verbose:
if i % 10000 == 0:
print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})")
print(f"SPM: {spm_total_time} - TOK: {tok_total_time}")
if ids != encoded.ids:
if check_details(line, ids, encoded.ids, sp, tok):
imperfect += 1
continue
else:
wrong += 1
else:
perfect += 1
assert (
ids == encoded.ids
), f"line {i}: {line} : \n\n{ids}\n{encoded.ids}\n{list(zip(encoded.ids, encoded.tokens))}"
print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})")
total = perfect + imperfect + wrong
print(f"Accuracy {perfect * 100 / total:.2f} Slowdown : {tok_total_time/ spm_total_time:.2f}")
if __name__ == "__main__":
main()
| tokenizers/bindings/python/scripts/spm_parity_check.py/0 | {
"file_path": "tokenizers/bindings/python/scripts/spm_parity_check.py",
"repo_id": "tokenizers",
"token_count": 4110
} | 233 |
use tokenizers as tk;
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use super::{
DestroyPtr, PyNormalizedString, PyNormalizedStringRefMut, RefMutContainer, RefMutGuard,
};
use crate::encoding::PyEncoding;
use crate::error::ToPyResult;
use crate::token::PyToken;
use tk::{OffsetReferential, OffsetType, Offsets, PreTokenizedString, Token};
fn split(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> {
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(
"`split` expect a callable with the signature: \
`fn(index: int, normalized: NormalizedString) -> List[NormalizedString]`",
))
} else {
ToPyResult(pretok.split(|i, normalized| {
let output = func.call((i, PyNormalizedString::from(normalized)), None)?;
Ok(output
.extract::<Vec<PyNormalizedString>>()?
.into_iter()
.map(tk::NormalizedString::from))
}))
.into()
}
}
fn normalize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> {
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(
"`normalize` expect a callable with the signature: \
`fn(normalized: NormalizedString)`",
))
} else {
ToPyResult(pretok.normalize(|normalized| {
let norm = PyNormalizedStringRefMut::new(normalized);
func.call((norm.get(),), None)?;
Ok(())
}))
.into()
}
}
fn tokenize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> {
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(
"`tokenize` expect a callable with the signature: \
`fn(str) -> List[Token]`",
))
} else {
ToPyResult(pretok.tokenize(|normalized| {
let output = func.call((normalized.get(),), None)?;
Ok(output
.extract::<&PyList>()?
.into_iter()
.map(|obj| Ok(Token::from(obj.extract::<PyToken>()?)))
.collect::<PyResult<Vec<_>>>()?)
}))
.into()
}
}
/// This is an enum
#[derive(Clone)]
pub struct PyOffsetReferential(OffsetReferential);
impl FromPyObject<'_> for PyOffsetReferential {
fn extract(obj: &PyAny) -> PyResult<Self> {
let s = obj.extract::<&str>()?;
Ok(Self(match s {
"original" => Ok(OffsetReferential::Original),
"normalized" => Ok(OffsetReferential::Normalized),
_ => Err(exceptions::PyValueError::new_err(
"Wrong value for OffsetReferential, expected one of `original, normalized`",
)),
}?))
}
}
#[derive(Clone)]
pub struct PyOffsetType(OffsetType);
impl FromPyObject<'_> for PyOffsetType {
fn extract(obj: &PyAny) -> PyResult<Self> {
let s = obj.extract::<&str>()?;
Ok(Self(match s {
"byte" => Ok(OffsetType::Byte),
"char" => Ok(OffsetType::Char),
_ => Err(exceptions::PyValueError::new_err(
"Wrong value for OffsetType, expected one of `byte, char`",
)),
}?))
}
}
type PySplit = (String, Offsets, Option<Vec<PyToken>>);
fn get_splits(
pretok: &PreTokenizedString,
offset_referential: PyOffsetReferential,
offset_type: PyOffsetType,
) -> Vec<PySplit> {
pretok
.get_splits(offset_referential.0, offset_type.0)
.into_iter()
.map(|(s, o, t)| {
(
s.to_owned(),
o,
t.as_ref()
.map(|tokens| tokens.iter().map(|t| t.clone().into()).collect()),
)
})
.collect()
}
fn to_encoding(
pretok: &PreTokenizedString,
type_id: u32,
word_idx: Option<u32>,
) -> PyResult<PyEncoding> {
Ok(ToPyResult(
pretok
.clone()
.into_encoding(word_idx, type_id, tk::OffsetType::Char),
)
.into_py()?
.into())
}
/// PreTokenizedString
///
/// Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
/// underlying string, while keeping track of the alignment information (offsets).
///
/// The PreTokenizedString manages what we call `splits`. Each split represents a substring
/// which is a subpart of the original string, with the relevant offsets and tokens.
///
/// When calling one of the methods used to modify the PreTokenizedString (namely one of
/// `split`, `normalize` or `tokenize), only the `splits` that don't have any associated
/// tokens will get modified.
///
/// Args:
/// sequence: str:
/// The string sequence used to initialize this PreTokenizedString
#[pyclass(module = "tokenizers", name = "PreTokenizedString")]
pub struct PyPreTokenizedString {
pub(crate) pretok: tk::PreTokenizedString,
}
impl From<PreTokenizedString> for PyPreTokenizedString {
fn from(pretok: PreTokenizedString) -> Self {
Self { pretok }
}
}
impl From<PyPreTokenizedString> for PreTokenizedString {
fn from(pretok: PyPreTokenizedString) -> Self {
pretok.pretok
}
}
#[pymethods]
impl PyPreTokenizedString {
#[new]
#[pyo3(text_signature = "(self, sequence)")]
fn new(s: &str) -> Self {
PreTokenizedString::from(s).into()
}
/// Split the PreTokenizedString using the given `func`
///
/// Args:
/// func: Callable[[index, NormalizedString], List[NormalizedString]]:
/// The function used to split each underlying split.
/// It is expected to return a list of `NormalizedString`, that represent the new
/// splits. If the given `NormalizedString` does not need any splitting, we can
/// just return it directly.
/// In order for the offsets to be tracked accurately, any returned `NormalizedString`
/// should come from calling either `.split` or `.slice` on the received one.
#[pyo3(text_signature = "(self, func)")]
fn split(&mut self, func: &PyAny) -> PyResult<()> {
split(&mut self.pretok, func)
}
/// Normalize each split of the `PreTokenizedString` using the given `func`
///
/// Args:
/// func: Callable[[NormalizedString], None]:
/// The function used to normalize each underlying split. This function
/// does not need to return anything, just calling the methods on the provided
/// NormalizedString allow its modification.
#[pyo3(text_signature = "(self, func)")]
fn normalize(&mut self, func: &PyAny) -> PyResult<()> {
normalize(&mut self.pretok, func)
}
/// Tokenize each split of the `PreTokenizedString` using the given `func`
///
/// Args:
/// func: Callable[[str], List[Token]]:
/// The function used to tokenize each underlying split. This function must return
/// a list of Token generated from the input str.
#[pyo3(text_signature = "(self, func)")]
fn tokenize(&mut self, func: &PyAny) -> PyResult<()> {
tokenize(&mut self.pretok, func)
}
/// Return an Encoding generated from this PreTokenizedString
///
/// Args:
/// type_id: int = 0:
/// The type_id to be used on the generated Encoding.
///
/// word_idx: Optional[int] = None:
/// An optional word index to be used for each token of this Encoding. If provided,
/// all the word indices in the generated Encoding will use this value, instead
/// of the one automatically tracked during pre-tokenization.
///
/// Returns:
/// An Encoding
#[pyo3(signature = (type_id = 0, word_idx = None))]
#[pyo3(text_signature = "(self, type_id=0, word_idx=None)")]
fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> {
to_encoding(&self.pretok, type_id, word_idx)
}
/// Get the splits currently managed by the PreTokenizedString
///
/// Args:
/// offset_referential: :obj:`str`
/// Whether the returned splits should have offsets expressed relative
/// to the original string, or the normalized one. choices: "original", "normalized".
///
/// offset_type: :obj:`str`
/// Whether the returned splits should have offsets expressed in bytes or chars.
/// When slicing an str, we usually want to use chars, which is the default value.
/// Now in some cases it might be interesting to get these offsets expressed in bytes,
/// so it is possible to change this here.
/// choices: "char", "bytes"
///
/// Returns
/// A list of splits
#[pyo3(signature = (
offset_referential = PyOffsetReferential(OffsetReferential::Original),
offset_type = PyOffsetType(OffsetType::Char)
))]
#[pyo3(text_signature = "(self, offset_referential=\"original\", offset_type=\"char\")")]
fn get_splits(
&self,
offset_referential: PyOffsetReferential,
offset_type: PyOffsetType,
) -> Vec<PySplit> {
get_splits(&self.pretok, offset_referential, offset_type)
}
}
#[pyclass(module = "tokenizers", name = "PreTokenizedString")]
#[derive(Clone)]
pub struct PyPreTokenizedStringRefMut {
inner: RefMutContainer<PreTokenizedString>,
}
impl DestroyPtr for PyPreTokenizedStringRefMut {
fn destroy(&mut self) {
self.inner.destroy();
}
}
impl PyPreTokenizedStringRefMut {
pub fn new(pretok: &mut tk::PreTokenizedString) -> RefMutGuard<Self> {
// SAFETY: This is safe because we return a RefMutGuard here.
// The compiler will make sure the &mut stays valid as necessary.
RefMutGuard::new(Self {
inner: RefMutContainer::new(pretok),
})
}
pub fn destroyed_error() -> PyErr {
exceptions::PyException::new_err(
"Cannot use a PreTokenizedStringRefMut outside `pre_tokenize`",
)
}
}
#[pymethods]
impl PyPreTokenizedStringRefMut {
fn split(&mut self, func: &PyAny) -> PyResult<()> {
self.inner
.map_mut(|pretok| split(pretok, func))
.ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)?
}
fn normalize(&mut self, func: &PyAny) -> PyResult<()> {
self.inner
.map_mut(|pretok| normalize(pretok, func))
.ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)?
}
fn tokenize(&mut self, func: &PyAny) -> PyResult<()> {
self.inner
.map_mut(|pretok| tokenize(pretok, func))
.ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)?
}
#[pyo3(signature = (type_id = 0, word_idx = None))]
fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> {
self.inner
.map(|pretok| to_encoding(pretok, type_id, word_idx))
.ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)?
}
#[pyo3(signature = (
offset_referential = PyOffsetReferential(OffsetReferential::Original),
offset_type = PyOffsetType(OffsetType::Char)
))]
fn get_splits(
&self,
offset_referential: PyOffsetReferential,
offset_type: PyOffsetType,
) -> PyResult<Vec<PySplit>> {
self.inner
.map(|pretok| get_splits(pretok, offset_referential, offset_type))
.ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)
}
}
| tokenizers/bindings/python/src/utils/pretokenization.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/pretokenization.rs",
"repo_id": "tokenizers",
"token_count": 4885
} | 234 |
from tokenizers import Tokenizer
from ..utils import data_dir, doc_wiki_tokenizer
disable_printing = True
original_print = print
def print(*args, **kwargs):
if not disable_printing:
original_print(*args, **kwargs)
class TestQuicktour:
# This method contains everything we don't want to run
@staticmethod
def slow_train():
tokenizer, trainer = TestQuicktour.get_tokenizer_trainer()
# START train
files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]]
tokenizer.train(files, trainer)
# END train
# START save
tokenizer.save("data/tokenizer-wiki.json")
# END save
@staticmethod
def get_tokenizer_trainer():
# START init_tokenizer
from tokenizers import Tokenizer
from tokenizers.models import BPE
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
# END init_tokenizer
# START init_trainer
from tokenizers.trainers import BpeTrainer
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
# END init_trainer
# START init_pretok
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
# END init_pretok
return tokenizer, trainer
def test_quicktour(self, doc_wiki_tokenizer):
def print(*args, **kwargs):
pass
try:
# START reload_tokenizer
tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json")
# END reload_tokenizer
except Exception:
tokenizer = Tokenizer.from_file(doc_wiki_tokenizer)
# START encode
output = tokenizer.encode("Hello, y'all! How are you ð ?")
# END encode
# START print_tokens
print(output.tokens)
# ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
# END print_tokens
assert output.tokens == [
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
]
# START print_ids
print(output.ids)
# [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# END print_ids
assert output.ids == [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# START print_offsets
print(output.offsets[9])
# (26, 27)
# END print_offsets
assert output.offsets[9] == (26, 27)
# START use_offsets
sentence = "Hello, y'all! How are you ð ?"
sentence[26:27]
# "ð"
# END use_offsets
assert sentence[26:27] == "ð"
# START check_sep
tokenizer.token_to_id("[SEP]")
# 2
# END check_sep
assert tokenizer.token_to_id("[SEP]") == 2
# START init_template_processing
from tokenizers.processors import TemplateProcessing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", tokenizer.token_to_id("[CLS]")),
("[SEP]", tokenizer.token_to_id("[SEP]")),
],
)
# END init_template_processing
# START print_special_tokens
output = tokenizer.encode("Hello, y'all! How are you ð ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_special_tokens_pair
output = tokenizer.encode("Hello, y'all!", "How are you ð ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens_pair
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"[SEP]",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_type_ids
print(output.type_ids)
# [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# END print_type_ids
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# START encode_batch
output = tokenizer.encode_batch(["Hello, y'all!", "How are you ð ?"])
# END encode_batch
# START encode_batch_pair
output = tokenizer.encode_batch(
[["Hello, y'all!", "How are you ð ?"], ["Hello to you too!", "I'm fine, thank you!"]]
)
# END encode_batch_pair
# START enable_padding
tokenizer.enable_padding(pad_id=3, pad_token="[PAD]")
# END enable_padding
# START print_batch_tokens
output = tokenizer.encode_batch(["Hello, y'all!", "How are you ð ?"])
print(output[1].tokens)
# ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# END print_batch_tokens
assert output[1].tokens == ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# START print_attention_mask
print(output[1].attention_mask)
# [1, 1, 1, 1, 1, 1, 1, 0]
# END print_attention_mask
assert output[1].attention_mask == [1, 1, 1, 1, 1, 1, 1, 0]
if __name__ == "__main__":
import os
from urllib import request
from zipfile import ZipFile
disable_printing = False
if not os.path.isdir("data/wikitext-103-raw"):
print("Downloading wikitext-103...")
wiki_text, _ = request.urlretrieve(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip"
)
with ZipFile(wiki_text, "r") as z:
print("Unzipping in data...")
z.extractall("data")
print("Now training...")
TestQuicktour.slow_train()
| tokenizers/bindings/python/tests/documentation/test_quicktour.py/0 | {
"file_path": "tokenizers/bindings/python/tests/documentation/test_quicktour.py",
"repo_id": "tokenizers",
"token_count": 3290
} | 235 |
# Encoding
<tokenizerslangcontent>
<python>
## Encoding
[[autodoc]] tokenizers.Encoding
- all
- attention_mask
- ids
- n_sequences
- offsets
- overflowing
- sequence_ids
- special_tokens_mask
- tokens
- type_ids
- word_ids
- words
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/encoding.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/encoding.mdx",
"repo_id": "tokenizers",
"token_count": 190
} | 236 |
from docutils import nodes
import sphinx
from sphinx.locale import _
from conf import rust_version
logger = sphinx.util.logging.getLogger(__name__)
class RustRef:
def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]):
doctype = name.split("_")[1]
parts = text.split("::")
if text.startswith("~"):
title = parts[-1]
parts[0] = parts[0][1:]
else:
content = text
link = self.base_link()
if doctype == "struct":
l, title = self.make_struct_link(parts, title)
if doctype == "func":
l, title = self.make_func_link(parts, title)
if doctype == "meth":
l, title = self.make_meth_link(parts, title)
if doctype == "trait":
l, title = self.make_trait_link(parts, title)
link += l
node = nodes.reference(internal=False, refuri=link, text=title)
wrapper = nodes.literal(classes=["xref"])
wrapper += node
return [wrapper], []
def base_link(self):
return f"https://docs.rs/tokenizers/{rust_version}"
def make_struct_link(self, parts, title):
link = ""
struct_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/struct.{struct_name}.html"
return link, title
def make_func_link(self, parts, title):
link = ""
fn_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/fn.{fn_name}.html"
return link, title
def make_meth_link(self, parts, title):
meth_name = parts[-1]
if meth_name.endswith("()"):
meth_name = meth_name[:-2]
link, title = self.make_struct_link(parts[:-1], title)
link += f"#method.{meth_name}"
if not title.endswith(")"):
title += "()"
return link, title
def make_trait_link(self, parts, title):
link = ""
trait_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/trait.{trait_name}.html"
return link, title
def setup(app):
app.add_role("rust_struct", RustRef())
app.add_role("rust_func", RustRef())
app.add_role("rust_meth", RustRef())
app.add_role("rust_trait", RustRef())
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| tokenizers/docs/source/_ext/rust_doc.py/0 | {
"file_path": "tokenizers/docs/source/_ext/rust_doc.py",
"repo_id": "tokenizers",
"token_count": 1221
} | 237 |
Tokenizers
====================================================================================================
Fast State-of-the-art tokenizers, optimized for both research and production
`ð€ Tokenizers`_ provides an implementation of today's most used tokenizers, with
a focus on performance and versatility. These tokenizers are also used in
`ð€ Transformers`_.
.. _ð€ Tokenizers: https://github.com/huggingface/tokenizers
.. _ð€ Transformers: https://github.com/huggingface/transformers
Main features:
----------------------------------------------------------------------------------------------------
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get
the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
.. toctree::
:maxdepth: 2
:caption: Getting Started
quicktour
installation/main
pipeline
components
.. toctree-tags::
:maxdepth: 3
:caption: Using ð€ Tokenizers
:glob:
:python:tutorials/python/*
.. toctree::
:maxdepth: 3
:caption: API Reference
api/reference
.. include:: entities.inc
| tokenizers/docs/source/index.rst/0 | {
"file_path": "tokenizers/docs/source/index.rst",
"repo_id": "tokenizers",
"token_count": 404
} | 238 |
use std::time::{Duration, Instant};
use criterion::black_box;
use tokenizers::{
Decoder, EncodeInput, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerImpl, Trainer,
};
pub fn iter_bench_encode<M, N, PT, PP, D>(
iters: u64,
tokenizer: &TokenizerImpl<M, N, PT, PP, D>,
lines: &[EncodeInput],
) -> Duration
where
M: Model,
N: Normalizer,
PT: PreTokenizer,
PP: PostProcessor,
D: Decoder,
{
let mut duration = Duration::new(0, 0);
let mut line_index: usize = 0;
for _i in 0..iters {
if line_index >= lines.len() {
line_index = 0;
}
let input = lines[line_index].clone();
let start = Instant::now();
let _ = black_box(tokenizer.encode(input, false));
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
}
pub fn iter_bench_encode_batch<M, N, PT, PP, D>(
iters: u64,
tokenizer: &TokenizerImpl<M, N, PT, PP, D>,
batches: &[Vec<EncodeInput>],
) -> Duration
where
M: Model + Send + Sync,
N: Normalizer + Send + Sync,
PT: PreTokenizer + Send + Sync,
PP: PostProcessor + Send + Sync,
D: Decoder + Send + Sync,
{
let mut duration = Duration::new(0, 0);
let mut batch_index: usize = 0;
for _i in 0..iters {
if batch_index >= batches.len() {
batch_index = 0;
}
let batch = batches[batch_index].clone();
let start = Instant::now();
let _ = black_box(tokenizer.encode_batch(batch, false));
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
}
pub fn iter_bench_train<T, M, N, PT, PP, D>(
iters: u64,
tokenizer: &mut TokenizerImpl<M, N, PT, PP, D>,
trainer: &mut T,
files: Vec<String>,
) -> Duration
where
T: Trainer<Model = M> + Sync,
M: Model + Send + Sync,
N: Normalizer + Send + Sync,
PT: PreTokenizer + Send + Sync,
PP: PostProcessor + Send + Sync,
D: Decoder + Send + Sync,
{
let mut duration = Duration::new(0, 0);
for _i in 0..iters {
let start = Instant::now();
tokenizer.train_from_files(trainer, files.clone()).unwrap();
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
}
| tokenizers/tokenizers/benches/common/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/common/mod.rs",
"repo_id": "tokenizers",
"token_count": 964
} | 239 |
// A dependency graph that contains any wasm must all be imported
// asynchronously. This `bootstrap.js` file does the single async import, so
// that no one else needs to worry about it again.
import("./index.js")
.catch(e => console.error("Error importing `index.js`:", e));
| tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js",
"repo_id": "tokenizers",
"token_count": 79
} | 240 |
//! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
use std::{iter, mem};
mod model;
mod serialization;
pub mod trainer;
mod word;
type Pair = (u32, u32);
/// Errors that can be encountered while using or constructing a `BPE` model.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// An error encountered while reading files mainly.
#[error("IoError: {0}")]
Io(#[from] std::io::Error),
/// An error forwarded from Serde, while parsing JSON
#[error("JsonError: {0}")]
JsonError(#[from] serde_json::Error),
/// When the vocab.json file is in the wrong format
#[error("Bad vocabulary json file")]
BadVocabulary,
/// When the merges.txt file is in the wrong format. This error holds the line
/// number of the line that caused the error.
#[error("Merges text file invalid at line {0}")]
BadMerges(usize),
/// If a token found in merges, is not in the vocab
#[error("Token `{0}` out of vocabulary")]
MergeTokenOutOfVocabulary(String),
/// If the provided unk token is out of vocabulary
#[error("Unk token `{0}` not found in the vocabulary")]
UnkTokenOutOfVocabulary(String),
/// Dropout not between 0 and 1.
#[error("Dropout should be between 0 and 1")]
InvalidDropout,
}
/// Provides access to the `FirstLastIterator` to any Iterator
pub(crate) trait WithFirstLastIterator: Iterator + Sized {
fn with_first_and_last(self) -> FirstLastIterator<Self>;
}
impl<I> WithFirstLastIterator for I
where
I: Iterator,
{
fn with_first_and_last(self) -> FirstLastIterator<Self> {
FirstLastIterator {
first: true,
iter: self.peekable(),
}
}
}
/// Provides information about whether an item is the first and/or the last of the iterator
pub(crate) struct FirstLastIterator<I>
where
I: Iterator,
{
first: bool,
iter: iter::Peekable<I>,
}
impl<I> Iterator for FirstLastIterator<I>
where
I: Iterator,
{
/// (is_first, is_last, item)
type Item = (bool, bool, I::Item);
fn next(&mut self) -> Option<Self::Item> {
let first = mem::replace(&mut self.first, false);
self.iter
.next()
.map(|e| (first, self.iter.peek().is_none(), e))
}
}
// Re-export
pub use model::*;
pub use trainer::*;
use word::*;
| tokenizers/tokenizers/src/models/bpe/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/bpe/mod.rs",
"repo_id": "tokenizers",
"token_count": 891
} | 241 |
use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashSet;
impl Serialize for WordPiece {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("WordPiece", 5)?;
// Small fields first
model.serialize_field("type", "WordPiece")?;
model.serialize_field("unk_token", &self.unk_token)?;
model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?;
model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?;
// Then large ones
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("vocab", &ordered_vocab)?;
model.end()
}
}
impl<'de> Deserialize<'de> for WordPiece {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"WordPiece",
&[
"type",
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
],
WordPieceVisitor,
)
}
}
struct WordPieceVisitor;
impl<'de> Visitor<'de> for WordPieceVisitor {
type Value = WordPiece;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct WordPiece")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = WordPieceBuilder::new();
let mut missing_fields = vec![
// for retrocompatibility the "type" field is not mandatory
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
]
.into_iter()
.collect::<HashSet<_>>();
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"unk_token" => builder = builder.unk_token(map.next_value()?),
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(map.next_value()?)
}
"max_input_chars_per_word" => {
builder = builder.max_input_chars_per_word(map.next_value()?)
}
"vocab" => builder = builder.vocab(map.next_value()?),
"type" => match map.next_value()? {
"WordPiece" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"WordPiece",
))
}
},
_ => {}
}
missing_fields.remove::<str>(&key);
}
if !missing_fields.is_empty() {
Err(serde::de::Error::missing_field(
missing_fields.iter().next().unwrap(),
))
} else {
Ok(builder.build().map_err(serde::de::Error::custom)?)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde() {
let wp = WordPiece::default();
let wp_s = "{\
\"type\":\"WordPiece\",\
\"unk_token\":\"[UNK]\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s);
assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp);
}
#[test]
fn deserialization_should_fail() {
let missing_unk = "{\
\"type\":\"WordPiece\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(missing_unk)
.unwrap_err()
.to_string()
.starts_with("missing field `unk_token`"));
let wrong_type = "{\
\"type\":\"WordLevel\",\
\"unk_token\":\"[UNK]\",\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(wrong_type)
.unwrap_err()
.to_string()
.starts_with("invalid value: string \"WordLevel\", expected WordPiece"));
}
}
| tokenizers/tokenizers/src/models/wordpiece/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordpiece/serialization.rs",
"repo_id": "tokenizers",
"token_count": 2453
} | 242 |
use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Punctuation {
#[serde(default = "default_split")]
behavior: SplitDelimiterBehavior,
}
fn default_split() -> SplitDelimiterBehavior {
SplitDelimiterBehavior::Isolated
}
impl Punctuation {
pub fn new(behavior: SplitDelimiterBehavior) -> Self {
Self { behavior }
}
}
impl Default for Punctuation {
fn default() -> Self {
Self::new(SplitDelimiterBehavior::Isolated)
}
}
impl PreTokenizer for Punctuation {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(is_punc, self.behavior))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn punctuation_basic() {
let pretok = Punctuation::default();
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey friend", (0, 10)),
("!", (10, 11)),
(" How are you", (11, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn deserialization() {
let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap();
assert_eq!(punctuation, Punctuation::default());
assert_eq!(
punctuation,
Punctuation::new(SplitDelimiterBehavior::Isolated)
);
}
#[test]
#[should_panic]
fn deserialization_erroneous() {
let _punctuation: Punctuation =
serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap();
}
}
| tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs",
"repo_id": "tokenizers",
"token_count": 1102
} | 243 |
use crate::utils::SysRegex;
use crate::{Offsets, Result};
use regex::Regex;
/// Pattern used to split a NormalizedString
pub trait Pattern {
/// Slice the given string in a list of pattern match positions, with
/// a boolean indicating whether this is a match or not.
///
/// This method *must* cover the whole string in its outputs, with
/// contiguous ordered slices.
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>>;
}
impl Pattern for char {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
let is_char = |c: char| -> bool { c == *self };
is_char.find_matches(inside)
}
}
impl Pattern for &str {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if self.is_empty() {
// If we try to find the matches with an empty string, just don't match anything
return Ok(vec![((0, inside.chars().count()), false)]);
}
let re = Regex::new(®ex::escape(self))?;
(&re).find_matches(inside)
}
}
impl Pattern for &String {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
let s: &str = self;
s.find_matches(inside)
}
}
impl Pattern for &Regex {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for m in self.find_iter(inside) {
if prev != m.start() {
splits.push(((prev, m.start()), false));
}
splits.push(((m.start(), m.end()), true));
prev = m.end();
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
impl Pattern for &SysRegex {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for (start, end) in self.find_iter(inside) {
if prev != start {
splits.push(((prev, start), false));
}
splits.push(((start, end), true));
prev = end;
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
impl<F> Pattern for F
where
F: Fn(char) -> bool,
{
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut last_offset = 0;
let mut last_seen = 0;
let mut matches = inside
.char_indices()
.flat_map(|(b, c)| {
last_seen = b + c.len_utf8();
if self(c) {
let mut events = Vec::with_capacity(2);
if last_offset < b {
// We need to emit what was before this match
events.push(((last_offset, b), false));
}
events.push(((b, b + c.len_utf8()), true));
last_offset = b + c.len_utf8();
events
} else {
vec![]
}
})
.collect::<Vec<_>>();
// Do not forget the last potential split
if last_seen > last_offset {
matches.push(((last_offset, last_seen), false));
}
Ok(matches)
}
}
/// Invert the `is_match` flags for the wrapped Pattern. This is usefull
/// for example when we use a regex that matches words instead of a delimiter,
/// and we want to match the delimiter.
pub struct Invert<P: Pattern>(pub P);
impl<P: Pattern> Pattern for Invert<P> {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
Ok(self
.0
.find_matches(inside)?
.into_iter()
.map(|(offsets, flag)| (offsets, !flag))
.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
use regex::Regex;
macro_rules! do_test {
($inside: expr, $pattern: expr => @ERROR) => {
assert!($pattern.find_matches($inside).is_err());
};
($inside: expr, $pattern: expr => $result: expr) => {
assert_eq!($pattern.find_matches($inside).unwrap(), $result);
assert_eq!(
Invert($pattern).find_matches($inside).unwrap(),
$result
.into_iter()
.map(|v: (Offsets, bool)| (v.0, !v.1))
.collect::<Vec<_>>()
);
};
}
#[test]
fn char() {
do_test!("aba", 'a' => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]);
do_test!("bbbba", 'a' => vec![((0, 4), false), ((4, 5), true)]);
do_test!("aabbb", 'a' => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("", 'a' => vec![((0, 0), false)]);
do_test!("aaa", 'b' => vec![((0, 3), false)]);
}
#[test]
fn str() {
do_test!("aba", "a" => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]);
do_test!("bbbba", "a" => vec![((0, 4), false), ((4, 5), true)]);
do_test!("aabbb", "a" => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("aabbb", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 5), false)]);
do_test!("aabbab", "ab" =>
vec![((0, 1), false), ((1, 3), true), ((3, 4), false), ((4, 6), true)]
);
do_test!("", "" => vec![((0, 0), false)]);
do_test!("aaa", "" => vec![((0, 3), false)]);
do_test!("aaa", "b" => vec![((0, 3), false)]);
}
#[test]
fn functions() {
let is_b = |c| c == 'b';
do_test!("aba", is_b => vec![((0, 1), false), ((1, 2), true), ((2, 3), false)]);
do_test!("aaaab", is_b => vec![((0, 4), false), ((4, 5), true)]);
do_test!("bbaaa", is_b => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("", is_b => vec![((0, 0), false)]);
do_test!("aaa", is_b => vec![((0, 3), false)]);
}
#[test]
fn regex() {
let is_whitespace = Regex::new(r"\s+").unwrap();
do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]);
do_test!(" a b ", &is_whitespace =>
vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)]
);
do_test!("", &is_whitespace => vec![((0, 0), false)]);
do_test!("ðŸð ð ð ðð ð£ðððð", &is_whitespace =>
vec![((0, 16), false), ((16, 17), true), ((17, 45), false)]
);
do_test!("aaa", &is_whitespace => vec![((0, 3), false)]);
}
#[test]
fn sys_regex() {
let is_whitespace = SysRegex::new(r"\s+").unwrap();
do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]);
do_test!(" a b ", &is_whitespace =>
vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)]
);
do_test!("", &is_whitespace => vec![((0, 0), false)]);
do_test!("ðŸð ð ð ðð ð£ðððð", &is_whitespace =>
vec![((0, 16), false), ((16, 17), true), ((17, 45), false)]
);
do_test!("aaa", &is_whitespace => vec![((0, 3), false)]);
}
}
| tokenizers/tokenizers/src/tokenizer/pattern.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/pattern.rs",
"repo_id": "tokenizers",
"token_count": 3903
} | 244 |
#![cfg(feature = "http")]
use tokenizers::{FromPretrainedParameters, Result, Tokenizer};
#[test]
fn test_from_pretrained() -> Result<()> {
let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["Hey", "there", "dear", "friend", "!"]
);
Ok(())
}
#[test]
fn test_from_pretrained_revision() -> Result<()> {
let tokenizer = Tokenizer::from_pretrained("anthony/tokenizers-test", None)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["hey", "there", "dear", "friend", "!"]
);
let tokenizer = Tokenizer::from_pretrained(
"anthony/tokenizers-test",
Some(FromPretrainedParameters {
revision: "gpt-2".to_string(),
..Default::default()
}),
)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["Hey", "Ä there", "Ä dear", "Ä friend", "!"]
);
Ok(())
}
#[test]
fn test_from_pretrained_invalid_model() {
let tokenizer = Tokenizer::from_pretrained("docs?", None);
assert!(tokenizer.is_err());
}
#[test]
fn test_from_pretrained_invalid_revision() {
let tokenizer = Tokenizer::from_pretrained(
"bert-base-cased",
Some(FromPretrainedParameters {
revision: "gpt?".to_string(),
..Default::default()
}),
);
assert!(tokenizer.is_err());
}
| tokenizers/tokenizers/tests/from_pretrained.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/from_pretrained.rs",
"repo_id": "tokenizers",
"token_count": 683
} | 245 |
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
LABEL maintainer="Hugging Face"
ARG DEBIAN_FRONTEND=noninteractive
# Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands)
SHELL ["sh", "-lc"]
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
# to be used as arguments for docker build (so far).
ARG PYTORCH='2.2.0'
# (not always a valid torch version)
ARG INTEL_TORCH_EXT='2.2.0'
# Example: `cu102`, `cu113`, etc.
ARG CUDA='cu118'
RUN apt update
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs
RUN git lfs install
RUN python3 -m pip install --no-cache-dir --upgrade pip
ARG REF=main
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
# During switch torch 2.2, we need to move (explicit) torch installation below but keep tf installation here.
# (otherwise we get `The runner has received a shutdown signal.` whose root cause is unknown but likely disk being full)
RUN python3 -m pip install --no-cache-dir -U tensorflow==2.13 protobuf==3.20.3 tensorflow_text tensorflow_probability
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime]
# RUN python3 -m pip uninstall -y torch torchvision torchaudio && python3 -m pip install --no-cache-dir -U torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
# TODO: Handle these in a python utility script
RUN [ ${#PYTORCH} -gt 0 -a "$PYTORCH" != "pre" ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile
RUN echo torch=$VERSION
# `torchvision` and `torchaudio` should be installed along with `torch`, especially for nightly build.
# Currently, let's just use their latest releases (when `torch` is installed with a release version)
# TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI).
RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA
RUN python3 -m pip uninstall -y flax jax
RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT -f https://developer.intel.com/ipex-whl-stable-cpu
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract
RUN python3 -m pip install -U "itsdangerous<2.1.0"
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/peft@main#egg=peft
# For bettertransformer
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum
# For video model testing
RUN python3 -m pip install --no-cache-dir decord av==9.2.0
# For `dinat` model
RUN python3 -m pip install --no-cache-dir 'natten<0.15.0' -f https://shi-labs.com/natten/wheels/$CUDA/
# For `nougat` tokenizer
RUN python3 -m pip install --no-cache-dir python-Levenshtein
# When installing in editable mode, `transformers` is not recognized as a package.
# this line must be added in order for python to be aware of transformers.
RUN cd transformers && python3 setup.py develop
| transformers/docker/transformers-all-latest-gpu/Dockerfile/0 | {
"file_path": "transformers/docker/transformers-all-latest-gpu/Dockerfile",
"repo_id": "transformers",
"token_count": 1166
} | 246 |
### Translating the Transformers documentation into your language
As part of our mission to democratize machine learning, we'd love to make the Transformers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language ð.
**ðïž Open an issue**
To get started, navigate to the [Issues](https://github.com/huggingface/transformers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "Translation template" from the "New issue" button.
Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list.
**ðŽ Fork the repository**
First, you'll need to [fork the Transformers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page.
Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows:
```bash
git clone https://github.com/YOUR-USERNAME/transformers.git
```
**ð Copy-paste the English version with a new language code**
The documentation files are in one leading directory:
- [`docs/source`](https://github.com/huggingface/transformers/tree/main/docs/source): All the documentation materials are organized here by language.
You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/transformers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following:
```bash
cd ~/path/to/transformers/docs
cp -r source/en source/LANG-ID
```
Here, `LANG-ID` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table.
**âïž Start translating**
The fun part comes - translating the text!
The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website.
> ð If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/LANG-ID/` directory!
The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/transformers/blob/main/docs/source/en/_toctree.yml):
```yaml
- sections:
- local: pipeline_tutorial # Do not change this! Use the same name for your .md file
title: Pipelines for inference # Translate this!
...
title: Tutorials # Translate this!
```
Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter.
> ð If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/transformers/issues) and tag @stevhliu and @MKhalusova.
| transformers/docs/TRANSLATING.md/0 | {
"file_path": "transformers/docs/TRANSLATING.md",
"repo_id": "transformers",
"token_count": 948
} | 247 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ÃberprÃŒfungen bei einer Pull-Anfrage
Wenn Sie eine Pull-Anfrage fÃŒr ð€ Transformers öffnen, wird eine ganze Reihe von PrÃŒfungen durchgefÃŒhrt, um sicherzustellen, dass der Patch, den Sie hinzufÃŒgen, nichts Bestehendes zerstört. Es gibt vier Arten von PrÃŒfungen:
- regulÀre Tests
- Erstellung der Dokumentation
- Stil von Code und Dokumentation
- allgemeine Konsistenz des Repository
In diesem Dokument werden wir versuchen zu erklÀren, worum es sich bei diesen verschiedenen PrÌfungen handelt und wie Sie sie lokal debuggen können, wenn eine der PrÌfungen in Ihrer PR fehlschlÀgt.
Beachten Sie, dass Sie im Idealfall eine Dev-Installation benötigen:
```bash
pip install transformers[dev]
```
oder fÃŒr eine bearbeitbare Installation:
```bash
pip install -e .[dev]
```
innerhalb des Transformers Repo. Da die Anzahl der optionalen AbhÀngigkeiten von Transformers stark zugenommen hat, ist es möglich, dass Sie nicht alle davon bekommen können. Wenn die Dev-Installation fehlschlÀgt, stellen Sie sicher, dass Sie das Deep Learning-Framework, mit dem Sie arbeiten, installieren (PyTorch, TensorFlow und/oder Flax).
```bash
pip install transformers[quality]
```
oder fÃŒr eine bearbeitbare Installation:
```bash
pip install -e .[quality]
```
## Tests
Alle Jobs, die mit `ci/circleci: run_tests_` beginnen, fÃŒhren Teile der Transformers-Testsuite aus. Jeder dieser Jobs konzentriert sich auf einen Teil der Bibliothek in einer bestimmten Umgebung: `ci/circleci: run_tests_pipelines_tf` zum Beispiel fÃŒhrt den Pipelines-Test in einer Umgebung aus, in der nur TensorFlow installiert ist.
Beachten Sie, dass nur ein Teil der Testsuite jedes Mal ausgefÃŒhrt wird, um zu vermeiden, dass Tests ausgefÃŒhrt werden, wenn es keine wirkliche Ãnderung in den Modulen gibt, die sie testen: ein Dienstprogramm wird ausgefÃŒhrt, um die Unterschiede in der Bibliothek zwischen vor und nach dem PR zu ermitteln (was GitHub Ihnen auf der Registerkarte "Files changes" anzeigt) und die Tests auszuwÀhlen, die von diesem Unterschied betroffen sind. Dieses Dienstprogramm kann lokal mit ausgefÃŒhrt werden:
```bash
python utils/tests_fetcher.py
```
aus dem Stammverzeichnis des Transformers-Repositoriums. Es wird:
1. ÃberprÃŒfen Sie fÃŒr jede Datei im Diff, ob die Ãnderungen im Code oder nur in Kommentaren oder Docstrings enthalten sind. Nur die Dateien mit echten CodeÀnderungen werden beibehalten.
2. Erstellen Sie eine interne Map, die fÌr jede Datei des Quellcodes der Bibliothek alle Dateien angibt, auf die sie rekursiv Einfluss nimmt. Von Modul A wird gesagt, dass es sich auf Modul B auswirkt, wenn Modul B Modul A importiert. FÌr die rekursive Auswirkung benötigen wir eine Kette von Modulen, die von Modul A zu Modul B fÌhrt und in der jedes Modul das vorherige importiert.
3. Wenden Sie diese Zuordnung auf die in Schritt 1 gesammelten Dateien an. So erhalten wir die Liste der Modelldateien, die von der PR betroffen sind.
4. Ordnen Sie jede dieser Dateien der/den entsprechenden Testdatei(en) zu und erhalten Sie die Liste der auszufÃŒhrenden Tests.
Wenn Sie das Skript lokal ausfÃŒhren, sollten Sie die Ergebnisse von Schritt 1, 3 und 4 ausgegeben bekommen und somit wissen, welche Tests ausgefÃŒhrt werden. Das Skript erstellt auÃerdem eine Datei namens `test_list.txt`, die die Liste der auszufÃŒhrenden Tests enthÀlt, die Sie mit dem folgenden Befehl lokal ausfÃŒhren können:
```bash
python -m pytest -n 8 --dist=loadfile -rA -s $(cat test_list.txt)
```
FÌr den Fall, dass Ihnen etwas entgangen ist, wird die komplette Testreihe ebenfalls tÀglich ausgefÌhrt.
## Dokumentation erstellen
Der Job `build_pr_documentation` erstellt und generiert eine Vorschau der Dokumentation, um sicherzustellen, dass alles in Ordnung ist, wenn Ihr PR zusammengefÃŒhrt wird. Ein Bot fÃŒgt einen Link zur Vorschau der Dokumentation zu Ihrem PR hinzu. Alle Ãnderungen, die Sie an dem PR vornehmen, werden automatisch in der Vorschau aktualisiert. Wenn die Dokumentation nicht erstellt werden kann, klicken Sie auf **Details** neben dem fehlgeschlagenen Auftrag, um zu sehen, wo der Fehler liegt. Oft ist der Fehler so einfach wie eine fehlende Datei im `toctree`.
Wenn Sie daran interessiert sind, die Dokumentation lokal zu erstellen oder in der Vorschau anzusehen, werfen Sie einen Blick in die [`README.md`](https://github.com/huggingface/transformers/tree/main/docs) im Ordner docs.
## Code und Dokumentationsstil
Die Formatierung des Codes erfolgt fÌr alle Quelldateien, die Beispiele und die Tests mit `black` und `ruff`. Wir haben auch ein benutzerdefiniertes Tool, das sich um die Formatierung von docstrings und `rst`-Dateien kÌmmert (`utils/style_doc.py`), sowie um die Reihenfolge der Lazy-Importe, die in den Transformers `__init__.py`-Dateien durchgefÌhrt werden (`utils/custom_init_isort.py`). All dies können Sie starten, indem Sie Folgendes ausfÌhren
```bash
make style
```
Das CI prÃŒft, ob diese innerhalb der PrÃŒfung `ci/circleci: check_code_quality` angewendet wurden. Es fÃŒhrt auch `ruff` aus, das einen grundlegenden Blick auf Ihren Code wirft und sich beschwert, wenn es eine undefinierte Variable findet oder eine, die nicht verwendet wird. Um diese PrÃŒfung lokal auszufÃŒhren, verwenden Sie
```bash
make quality
```
Dies kann sehr viel Zeit in Anspruch nehmen. Um dasselbe nur fÌr die Dateien zu tun, die Sie im aktuellen Zweig geÀndert haben, fÌhren Sie
```bash
make fixup
```
Dieser letzte Befehl fÌhrt auch alle zusÀtzlichen PrÌfungen fÌr die Konsistenz des Repositorys durch. Schauen wir uns diese an.
## Repository-Konsistenz
Dies fasst alle Tests zusammen, die sicherstellen, dass Ihr PR das Repository in einem guten Zustand verlÀsst. Sie können diese PrÌfung lokal durchfÌhren, indem Sie Folgendes ausfÌhren:
```bash
make repo-consistency
```
Dies ÃŒberprÃŒft, ob:
- Alle zum Init hinzugefÃŒgten Objekte sind dokumentiert (ausgefÃŒhrt von `utils/check_repo.py`)
- Alle `__init__.py`-Dateien haben in ihren beiden Abschnitten den gleichen Inhalt (ausgefÃŒhrt von `utils/check_inits.py`)
- Der gesamte Code, der als Kopie eines anderen Moduls identifiziert wurde, stimmt mit dem Original ÃŒberein (ausgefÃŒhrt von `utils/check_copies.py`)
- Alle Konfigurationsklassen haben mindestens einen gÌltigen PrÌfpunkt, der in ihren Dokumentationen erwÀhnt wird (ausgefÌhrt von `utils/check_config_docstrings.py`)
- Alle Konfigurationsklassen enthalten nur Attribute, die in den entsprechenden Modellierungsdateien verwendet werden (ausgefÃŒhrt von `utils/check_config_attributes.py`)
- Die Ãbersetzungen der READMEs und der Index des Dokuments haben die gleiche Modellliste wie die Haupt-README (durchgefÃŒhrt von `utils/check_copies.py`)
- Die automatisch generierten Tabellen in der Dokumentation sind auf dem neuesten Stand (ausgefÃŒhrt von `utils/check_table.py`)
- Die Bibliothek verfÌgt Ìber alle Objekte, auch wenn nicht alle optionalen AbhÀngigkeiten installiert sind (ausgefÌhrt von `utils/check_dummies.py`)
Sollte diese PrÌfung fehlschlagen, mÌssen die ersten beiden Punkte manuell korrigiert werden, die letzten vier können automatisch fÌr Sie korrigiert werden, indem Sie den Befehl
```bash
make fix-copies
```
ZusÀtzliche PrÌfungen betreffen PRs, die neue Modelle hinzufÌgen, vor allem, dass:
- Alle hinzugefÃŒgten Modelle befinden sich in einer Auto-Zuordnung (durchgefÃŒhrt von `utils/check_repo.py`)
<!-- TODO Sylvain, add a check that makes sure the common tests are implemented.-->
- Alle Modelle werden ordnungsgemÀà getestet (ausgefÌhrt von `utils/check_repo.py`)
<!-- TODO Sylvain, add the following
- All models are added to the main README, inside the main doc
- All checkpoints used actually exist on the Hub
-->
### Kopien prÃŒfen
Da die Transformers-Bibliothek in Bezug auf den Modellcode sehr eigenwillig ist und jedes Modell vollstÀndig in einer einzigen Datei implementiert sein sollte, ohne sich auf andere Modelle zu stÃŒtzen, haben wir einen Mechanismus hinzugefÃŒgt, der ÃŒberprÃŒft, ob eine Kopie des Codes einer Ebene eines bestimmten Modells mit dem Original ÃŒbereinstimmt. Auf diese Weise können wir bei einer Fehlerbehebung alle anderen betroffenen Modelle sehen und entscheiden, ob wir die Ãnderung weitergeben oder die Kopie zerstören.
<Tip>
Wenn eine Datei eine vollstÀndige Kopie einer anderen Datei ist, sollten Sie sie in der Konstante `FULL_COPIES` von `utils/check_copies.py` registrieren.
</Tip>
Dieser Mechanismus stÌtzt sich auf Kommentare der Form `# Kopiert von xxx`. Das `xxx` sollte den gesamten Pfad zu der Klasse der Funktion enthalten, die darunter kopiert wird. Zum Beispiel ist `RobertaSelfOutput` eine direkte Kopie der Klasse `BertSelfOutput`. Sie können also [hier](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L289) sehen, dass sie einen Kommentar hat:
```py
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
```
Beachten Sie, dass Sie dies nicht auf eine ganze Klasse anwenden, sondern auf die entsprechenden Methoden, von denen kopiert wird. Zum Beispiel [hier](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L598) können Sie sehen, wie `RobertaPreTrainedModel._init_weights` von der gleichen Methode in `BertPreTrainedModel` mit dem Kommentar kopiert wird:
```py
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
```
Manchmal ist die Kopie bis auf die Namen genau gleich: zum Beispiel verwenden wir in `RobertaAttention` `RobertaSelfAttention` anstelle von `BertSelfAttention`, aber ansonsten ist der Code genau derselbe. Aus diesem Grund unterstÌtzt `#Copied from` einfache String-Ersetzungen mit der folgenden Syntax: `Kopiert von xxx mit foo->bar`. Das bedeutet, dass der Code kopiert wird, wobei alle Instanzen von "foo" durch "bar" ersetzt werden. Sie können sehen, wie es [hier](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L304C1-L304C86) in `RobertaAttention` mit dem Kommentar verwendet wird:
```py
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
```
Beachten Sie, dass um den Pfeil herum keine Leerzeichen stehen sollten (es sei denn, das Leerzeichen ist Teil des zu ersetzenden Musters, natÃŒrlich).
Sie können mehrere Muster durch ein Komma getrennt hinzufÌgen. Zum Beispiel ist hier `CamemberForMaskedLM` eine direkte Kopie von `RobertaForMaskedLM` mit zwei Ersetzungen: `Roberta` zu `Camembert` und `ROBERTA` zu `CAMEMBERT`. Sie können [hier](https://github.com/huggingface/transformers/blob/15082a9dc6950ecae63a0d3e5060b2fc7f15050a/src/transformers/models/camembert/modeling_camembert.py#L929) sehen, wie dies mit dem Kommentar gemacht wird:
```py
# Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT
```
Wenn die Reihenfolge eine Rolle spielt (weil eine der Ersetzungen mit einer vorherigen in Konflikt geraten könnte), werden die Ersetzungen von links nach rechts ausgefÌhrt.
<Tip>
Wenn die Ersetzungen die Formatierung Àndern (wenn Sie z.B. einen kurzen Namen durch einen sehr langen Namen ersetzen), wird die Kopie nach Anwendung des automatischen Formats ÌberprÌft.
</Tip>
Eine andere Möglichkeit, wenn es sich bei den Mustern nur um verschiedene Umschreibungen derselben Ersetzung handelt (mit einer groÃ- und einer kleingeschriebenen Variante), besteht darin, die Option `all-casing` hinzuzufÃŒgen. [Hier](https://github.com/huggingface/transformers/blob/15082a9dc6950ecae63a0d3e5060b2fc7f15050a/src/transformers/models/mobilebert/modeling_mobilebert.py#L1237) ist ein Beispiel in `MobileBertForSequenceClassification` mit dem Kommentar:
```py
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
```
In diesem Fall wird der Code von `BertForSequenceClassification` kopiert, indem er ersetzt wird:
- `Bert` durch `MobileBert` (zum Beispiel bei der Verwendung von `MobileBertModel` in der Init)
- `bert` durch `mobilebert` (zum Beispiel bei der Definition von `self.mobilebert`)
- `BERT` durch `MOBILEBERT` (in der Konstante `MOBILEBERT_INPUTS_DOCSTRING`)
| transformers/docs/source/de/pr_checks.md/0 | {
"file_path": "transformers/docs/source/de/pr_checks.md",
"repo_id": "transformers",
"token_count": 4986
} | 248 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Benchmarks
<Tip warning={true}>
Hugging Face's Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed
and memory complexity of Transformer models.
</Tip>
[[open-in-colab]]
Let's take a look at how ð€ Transformers models can be benchmarked, best practices, and already available benchmarks.
A notebook explaining in more detail how to benchmark ð€ Transformers models can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb).
## How to benchmark ð€ Transformers models
The classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] allow to flexibly benchmark ð€ Transformers models. The benchmark classes allow us to measure the _peak memory usage_ and _required time_ for both _inference_ and _training_.
<Tip>
Hereby, _inference_ is defined by a single forward pass, and _training_ is defined by a single forward pass and
backward pass.
</Tip>
The benchmark classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] expect an object of type [`PyTorchBenchmarkArguments`] and
[`TensorFlowBenchmarkArguments`], respectively, for instantiation. [`PyTorchBenchmarkArguments`] and [`TensorFlowBenchmarkArguments`] are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type _bert-base-cased_ can be benchmarked.
<frameworkcontent>
<pt>
```py
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
>>> args = PyTorchBenchmarkArguments(models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
>>> benchmark = PyTorchBenchmark(args)
```
</pt>
<tf>
```py
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
>>> args = TensorFlowBenchmarkArguments(
... models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
... )
>>> benchmark = TensorFlowBenchmark(args)
```
</tf>
</frameworkcontent>
Here, three arguments are given to the benchmark argument data classes, namely `models`, `batch_sizes`, and
`sequence_lengths`. The argument `models` is required and expects a `list` of model identifiers from the
[model hub](https://huggingface.co/models) The `list` arguments `batch_sizes` and `sequence_lengths` define
the size of the `input_ids` on which the model is benchmarked. There are many more parameters that can be configured
via the benchmark argument data classes. For more detail on these one can either directly consult the files
`src/transformers/benchmark/benchmark_args_utils.py`, `src/transformers/benchmark/benchmark_args.py` (for PyTorch)
and `src/transformers/benchmark/benchmark_args_tf.py` (for Tensorflow). Alternatively, running the following shell
commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow
respectively.
<frameworkcontent>
<pt>
```bash
python examples/pytorch/benchmarking/run_benchmark.py --help
```
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
```py
>>> results = benchmark.run()
>>> print(results)
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
google-bert/bert-base-uncased 8 8 0.006
google-bert/bert-base-uncased 8 32 0.006
google-bert/bert-base-uncased 8 128 0.018
google-bert/bert-base-uncased 8 512 0.088
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
google-bert/bert-base-uncased 8 8 1227
google-bert/bert-base-uncased 8 32 1281
google-bert/bert-base-uncased 8 128 1307
google-bert/bert-base-uncased 8 512 1539
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: PyTorch
- use_torchscript: False
- framework_version: 1.4.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 08:58:43.371351
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</pt>
<tf>
```bash
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
```
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
```py
>>> results = benchmark.run()
>>> print(results)
>>> results = benchmark.run()
>>> print(results)
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
google-bert/bert-base-uncased 8 8 0.005
google-bert/bert-base-uncased 8 32 0.008
google-bert/bert-base-uncased 8 128 0.022
google-bert/bert-base-uncased 8 512 0.105
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
google-bert/bert-base-uncased 8 8 1330
google-bert/bert-base-uncased 8 32 1330
google-bert/bert-base-uncased 8 128 1330
google-bert/bert-base-uncased 8 512 1770
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: Tensorflow
- use_xla: False
- framework_version: 2.2.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 09:26:35.617317
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</tf>
</frameworkcontent>
By default, the _time_ and the _required memory_ for _inference_ are benchmarked. In the example output above the first
two sections show the result corresponding to _inference time_ and _inference memory_. In addition, all relevant
information about the computing environment, _e.g._ the GPU type, the system, the library versions, etc... are printed
out in the third section under _ENVIRONMENT INFORMATION_. This information can optionally be saved in a _.csv_ file
when adding the argument `save_to_csv=True` to [`PyTorchBenchmarkArguments`] and
[`TensorFlowBenchmarkArguments`] respectively. In this case, every section is saved in a separate
_.csv_ file. The path to each _.csv_ file can optionally be defined via the argument data classes.
Instead of benchmarking pre-trained models via their model identifier, _e.g._ `google-bert/bert-base-uncased`, the user can
alternatively benchmark an arbitrary configuration of any available model class. In this case, a `list` of
configurations must be inserted with the benchmark args as follows.
<frameworkcontent>
<pt>
```py
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
>>> args = PyTorchBenchmarkArguments(
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
... )
>>> config_base = BertConfig()
>>> config_384_hid = BertConfig(hidden_size=384)
>>> config_6_lay = BertConfig(num_hidden_layers=6)
>>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
>>> benchmark.run()
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
bert-base 8 128 0.006
bert-base 8 512 0.006
bert-base 8 128 0.018
bert-base 8 512 0.088
bert-384-hid 8 8 0.006
bert-384-hid 8 32 0.006
bert-384-hid 8 128 0.011
bert-384-hid 8 512 0.054
bert-6-lay 8 8 0.003
bert-6-lay 8 32 0.004
bert-6-lay 8 128 0.009
bert-6-lay 8 512 0.044
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
bert-base 8 8 1277
bert-base 8 32 1281
bert-base 8 128 1307
bert-base 8 512 1539
bert-384-hid 8 8 1005
bert-384-hid 8 32 1027
bert-384-hid 8 128 1035
bert-384-hid 8 512 1255
bert-6-lay 8 8 1097
bert-6-lay 8 32 1101
bert-6-lay 8 128 1127
bert-6-lay 8 512 1359
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: PyTorch
- use_torchscript: False
- framework_version: 1.4.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 09:35:25.143267
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</pt>
<tf>
```py
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
>>> args = TensorFlowBenchmarkArguments(
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
... )
>>> config_base = BertConfig()
>>> config_384_hid = BertConfig(hidden_size=384)
>>> config_6_lay = BertConfig(num_hidden_layers=6)
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
>>> benchmark.run()
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
bert-base 8 8 0.005
bert-base 8 32 0.008
bert-base 8 128 0.022
bert-base 8 512 0.106
bert-384-hid 8 8 0.005
bert-384-hid 8 32 0.007
bert-384-hid 8 128 0.018
bert-384-hid 8 512 0.064
bert-6-lay 8 8 0.002
bert-6-lay 8 32 0.003
bert-6-lay 8 128 0.0011
bert-6-lay 8 512 0.074
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
bert-base 8 8 1330
bert-base 8 32 1330
bert-base 8 128 1330
bert-base 8 512 1770
bert-384-hid 8 8 1330
bert-384-hid 8 32 1330
bert-384-hid 8 128 1330
bert-384-hid 8 512 1540
bert-6-lay 8 8 1330
bert-6-lay 8 32 1330
bert-6-lay 8 128 1330
bert-6-lay 8 512 1540
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: Tensorflow
- use_xla: False
- framework_version: 2.2.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 09:38:15.487125
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</tf>
</frameworkcontent>
Again, _inference time_ and _required memory_ for _inference_ are measured, but this time for customized configurations
of the `BertModel` class. This feature can especially be helpful when deciding for which configuration the model
should be trained.
## Benchmark best practices
This section lists a couple of best practices one should be aware of when benchmarking a model.
- Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user
specifies on which device the code should be run by setting the `CUDA_VISIBLE_DEVICES` environment variable in the
shell, _e.g._ `export CUDA_VISIBLE_DEVICES=0` before running the code.
- The option `no_multi_processing` should only be set to `True` for testing and debugging. To ensure accurate
memory measurement it is recommended to run each memory benchmark in a separate process by making sure
`no_multi_processing` is set to `True`.
- One should always state the environment information when sharing the results of a model benchmark. Results can vary
heavily between different GPU devices, library versions, etc., so that benchmark results on their own are not very
useful for the community.
## Sharing your benchmark
Previously all available core models (10 at the time) have been benchmarked for _inference time_, across many different
settings: using PyTorch, with and without TorchScript, using TensorFlow, with and without XLA. All of those tests were
done across CPUs (except for TensorFlow XLA) and GPUs.
The approach is detailed in the [following blogpost](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2) and the results are
available [here](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing).
With the new _benchmark_ tools, it is easier than ever to share your benchmark results with the community
- [PyTorch Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md).
- [TensorFlow Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md).
| transformers/docs/source/en/benchmarks.md/0 | {
"file_path": "transformers/docs/source/en/benchmarks.md",
"repo_id": "transformers",
"token_count": 7208
} | 249 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Hyperparameter Search using Trainer API
ð€ Transformers provides a [`Trainer`] class optimized for training ð€ Transformers models, making it easier to start training without manually writing your own training loop. The [`Trainer`] provides API for hyperparameter search. This doc shows how to enable it in example.
## Hyperparameter Search backend
[`Trainer`] supports four hyperparameter search backends currently:
[optuna](https://optuna.org/), [sigopt](https://sigopt.com/), [raytune](https://docs.ray.io/en/latest/tune/index.html) and [wandb](https://wandb.ai/site/sweeps).
you should install them before using them as the hyperparameter search backend
```bash
pip install optuna/sigopt/wandb/ray[tune]
```
## How to enable Hyperparameter search in example
Define the hyperparameter search space, different backends need different format.
For sigopt, see sigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter), it's like following:
```py
>>> def sigopt_hp_space(trial):
... return [
... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"},
... {
... "categorical_values": ["16", "32", "64", "128"],
... "name": "per_device_train_batch_size",
... "type": "categorical",
... },
... ]
```
For optuna, see optuna [object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py), it's like following:
```py
>>> def optuna_hp_space(trial):
... return {
... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]),
... }
```
Optuna provides multi-objective HPO. You can pass `direction` in `hyperparameter_search` and define your own compute_objective to return multiple objective values. The Pareto Front (`List[BestRun]`) will be returned in hyperparameter_search, you should refer to the test case `TrainerHyperParameterMultiObjectOptunaIntegrationTest` in [test_trainer](https://github.com/huggingface/transformers/blob/main/tests/trainer/test_trainer.py). It's like following
```py
>>> best_trials = trainer.hyperparameter_search(
... direction=["minimize", "maximize"],
... backend="optuna",
... hp_space=optuna_hp_space,
... n_trials=20,
... compute_objective=compute_objective,
... )
```
For raytune, see raytune [object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html), it's like following:
```py
>>> def ray_hp_space(trial):
... return {
... "learning_rate": tune.loguniform(1e-6, 1e-4),
... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]),
... }
```
For wandb, see wandb [object_parameter](https://docs.wandb.ai/guides/sweeps/configuration), it's like following:
```py
>>> def wandb_hp_space(trial):
... return {
... "method": "random",
... "metric": {"name": "objective", "goal": "minimize"},
... "parameters": {
... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
... "per_device_train_batch_size": {"values": [16, 32, 64, 128]},
... },
... }
```
Define a `model_init` function and pass it to the [`Trainer`], as an example:
```py
>>> def model_init(trial):
... return AutoModelForSequenceClassification.from_pretrained(
... model_args.model_name_or_path,
... from_tf=bool(".ckpt" in model_args.model_name_or_path),
... config=config,
... cache_dir=model_args.cache_dir,
... revision=model_args.model_revision,
... token=True if model_args.use_auth_token else None,
... )
```
Create a [`Trainer`] with your `model_init` function, training arguments, training and test datasets, and evaluation function:
```py
>>> trainer = Trainer(
... model=None,
... args=training_args,
... train_dataset=small_train_dataset,
... eval_dataset=small_eval_dataset,
... compute_metrics=compute_metrics,
... tokenizer=tokenizer,
... model_init=model_init,
... data_collator=data_collator,
... )
```
Call hyperparameter search, get the best trial parameters, backend could be `"optuna"`/`"sigopt"`/`"wandb"`/`"ray"`. direction can be`"minimize"` or `"maximize"`, which indicates whether to optimize greater or lower objective.
You could define your own compute_objective function, if not defined, the default compute_objective will be called, and the sum of eval metric like f1 is returned as objective value.
```py
>>> best_trial = trainer.hyperparameter_search(
... direction="maximize",
... backend="optuna",
... hp_space=optuna_hp_space,
... n_trials=20,
... compute_objective=compute_objective,
... )
```
## Hyperparameter search For DDP finetune
Currently, Hyperparameter search for DDP is enabled for optuna and sigopt. Only the rank-zero process will generate the search trial and pass the argument to other ranks.
| transformers/docs/source/en/hpo_train.md/0 | {
"file_path": "transformers/docs/source/en/hpo_train.md",
"repo_id": "transformers",
"token_count": 2076
} | 250 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Callbacks
Callbacks are objects that can customize the behavior of the training loop in the PyTorch
[`Trainer`] (this feature is not yet implemented in TensorFlow) that can inspect the training loop
state (for progress reporting, logging on TensorBoard or other ML platforms...) and take decisions (like early
stopping).
Callbacks are "read only" pieces of code, apart from the [`TrainerControl`] object they return, they
cannot change anything in the training loop. For customizations that require changes in the training loop, you should
subclass [`Trainer`] and override the methods you need (see [trainer](trainer) for examples).
By default, `TrainingArguments.report_to` is set to `"all"`, so a [`Trainer`] will use the following callbacks.
- [`DefaultFlowCallback`] which handles the default behavior for logging, saving and evaluation.
- [`PrinterCallback`] or [`ProgressCallback`] to display progress and print the
logs (the first one is used if you deactivate tqdm through the [`TrainingArguments`], otherwise
it's the second one).
- [`~integrations.TensorBoardCallback`] if tensorboard is accessible (either through PyTorch >= 1.4
or tensorboardX).
- [`~integrations.WandbCallback`] if [wandb](https://www.wandb.com/) is installed.
- [`~integrations.CometCallback`] if [comet_ml](https://www.comet.ml/site/) is installed.
- [`~integrations.MLflowCallback`] if [mlflow](https://www.mlflow.org/) is installed.
- [`~integrations.NeptuneCallback`] if [neptune](https://neptune.ai/) is installed.
- [`~integrations.AzureMLCallback`] if [azureml-sdk](https://pypi.org/project/azureml-sdk/) is
installed.
- [`~integrations.CodeCarbonCallback`] if [codecarbon](https://pypi.org/project/codecarbon/) is
installed.
- [`~integrations.ClearMLCallback`] if [clearml](https://github.com/allegroai/clearml) is installed.
- [`~integrations.DagsHubCallback`] if [dagshub](https://dagshub.com/) is installed.
- [`~integrations.FlyteCallback`] if [flyte](https://flyte.org/) is installed.
- [`~integrations.DVCLiveCallback`] if [dvclive](https://dvc.org/doc/dvclive) is installed.
If a package is installed but you don't wish to use the accompanying integration, you can change `TrainingArguments.report_to` to a list of just those integrations you want to use (e.g. `["azure_ml", "wandb"]`).
The main class that implements callbacks is [`TrainerCallback`]. It gets the
[`TrainingArguments`] used to instantiate the [`Trainer`], can access that
Trainer's internal state via [`TrainerState`], and can take some actions on the training loop via
[`TrainerControl`].
## Available Callbacks
Here is the list of the available [`TrainerCallback`] in the library:
[[autodoc]] integrations.CometCallback
- setup
[[autodoc]] DefaultFlowCallback
[[autodoc]] PrinterCallback
[[autodoc]] ProgressCallback
[[autodoc]] EarlyStoppingCallback
[[autodoc]] integrations.TensorBoardCallback
[[autodoc]] integrations.WandbCallback
- setup
[[autodoc]] integrations.MLflowCallback
- setup
[[autodoc]] integrations.AzureMLCallback
[[autodoc]] integrations.CodeCarbonCallback
[[autodoc]] integrations.NeptuneCallback
[[autodoc]] integrations.ClearMLCallback
[[autodoc]] integrations.DagsHubCallback
[[autodoc]] integrations.FlyteCallback
[[autodoc]] integrations.DVCLiveCallback
- setup
## TrainerCallback
[[autodoc]] TrainerCallback
Here is an example of how to register a custom callback with the PyTorch [`Trainer`]:
```python
class MyCallback(TrainerCallback):
"A callback that prints a message at the beginning of training"
def on_train_begin(self, args, state, control, **kwargs):
print("Starting training")
trainer = Trainer(
model,
args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
callbacks=[MyCallback], # We can either pass the callback class this way or an instance of it (MyCallback())
)
```
Another way to register a callback is to call `trainer.add_callback()` as follows:
```python
trainer = Trainer(...)
trainer.add_callback(MyCallback)
# Alternatively, we can pass an instance of the callback class
trainer.add_callback(MyCallback())
```
## TrainerState
[[autodoc]] TrainerState
## TrainerControl
[[autodoc]] TrainerControl
| transformers/docs/source/en/main_classes/callback.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/callback.md",
"repo_id": "transformers",
"token_count": 1520
} | 251 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Tokenizer
A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most
of the tokenizers are available in two flavors: a full python implementation and a "Fast" implementation based on the
Rust library [ð€ Tokenizers](https://github.com/huggingface/tokenizers). The "Fast" implementations allows:
1. a significant speed-up in particular when doing batched tokenization and
2. additional methods to map between the original string (character and words) and the token space (e.g. getting the
index of the token comprising a given character or the span of characters corresponding to a given token).
The base classes [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`]
implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and
"Fast" tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library
(downloaded from HuggingFace's AWS S3 repository). They both rely on
[`~tokenization_utils_base.PreTrainedTokenizerBase`] that contains the common methods, and
[`~tokenization_utils_base.SpecialTokensMixin`].
[`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] thus implement the main
methods for using all the tokenizers:
- Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and
encoding/decoding (i.e., tokenizing and converting to integers).
- Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece...).
- Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the
tokenizer for easy access and making sure they are not split during tokenization.
[`BatchEncoding`] holds the output of the
[`~tokenization_utils_base.PreTrainedTokenizerBase`]'s encoding methods (`__call__`,
`encode_plus` and `batch_encode_plus`) and is derived from a Python dictionary. When the tokenizer is a pure python
tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by
these methods (`input_ids`, `attention_mask`...). When the tokenizer is a "Fast" tokenizer (i.e., backed by
HuggingFace [tokenizers library](https://github.com/huggingface/tokenizers)), this class provides in addition
several advanced alignment methods which can be used to map between the original string (character and words) and the
token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding
to a given token).
## PreTrainedTokenizer
[[autodoc]] PreTrainedTokenizer
- __call__
- add_tokens
- add_special_tokens
- apply_chat_template
- batch_decode
- decode
- encode
- push_to_hub
- all
## PreTrainedTokenizerFast
The [`PreTrainedTokenizerFast`] depend on the [tokenizers](https://huggingface.co/docs/tokenizers) library. The tokenizers obtained from the ð€ tokenizers library can be
loaded very simply into ð€ transformers. Take a look at the [Using tokenizers from ð€ tokenizers](../fast_tokenizers) page to understand how this is done.
[[autodoc]] PreTrainedTokenizerFast
- __call__
- add_tokens
- add_special_tokens
- apply_chat_template
- batch_decode
- decode
- encode
- push_to_hub
- all
## BatchEncoding
[[autodoc]] BatchEncoding
| transformers/docs/source/en/main_classes/tokenizer.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/tokenizer.md",
"repo_id": "transformers",
"token_count": 1144
} | 252 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BERTweet
## Overview
The BERTweet model was proposed in [BERTweet: A pre-trained language model for English Tweets](https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf) by Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen.
The abstract from the paper is the following:
*We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having
the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et
al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al.,
2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks:
Part-of-speech tagging, Named-entity recognition and text classification.*
This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BERTweet).
## Usage example
```python
>>> import torch
>>> from transformers import AutoModel, AutoTokenizer
>>> bertweet = AutoModel.from_pretrained("vinai/bertweet-base")
>>> # For transformers v4.x+:
>>> tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False)
>>> # For transformers v3.x:
>>> # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base")
>>> # INPUT TWEET IS ALREADY NORMALIZED!
>>> line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:"
>>> input_ids = torch.tensor([tokenizer.encode(line)])
>>> with torch.no_grad():
... features = bertweet(input_ids) # Models outputs are now tuples
>>> # With TensorFlow 2.0+:
>>> # from transformers import TFAutoModel
>>> # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base")
```
<Tip>
This implementation is the same as BERT, except for tokenization method. Refer to [BERT documentation](bert) for
API reference information.
</Tip>
## BertweetTokenizer
[[autodoc]] BertweetTokenizer
| transformers/docs/source/en/model_doc/bertweet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/bertweet.md",
"repo_id": "transformers",
"token_count": 806
} | 253 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Data2Vec
## Overview
The Data2Vec model was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.
Data2Vec proposes a unified framework for self-supervised learning across different data modalities - text, audio and images.
Importantly, predicted targets for pre-training are contextualized latent representations of the inputs, rather than modality-specific, context-independent targets.
The abstract from the paper is the following:
*While the general idea of self-supervised learning is identical across modalities, the actual algorithms and
objectives differ widely because they were developed with a single modality in mind. To get us closer to general
self-supervised learning, we present data2vec, a framework that uses the same learning method for either speech,
NLP or computer vision. The core idea is to predict latent representations of the full input data based on a
masked view of the input in a selfdistillation setup using a standard Transformer architecture.
Instead of predicting modality-specific targets such as words, visual tokens or units of human speech which
are local in nature, data2vec predicts contextualized latent representations that contain information from
the entire input. Experiments on the major benchmarks of speech recognition, image classification, and
natural language understanding demonstrate a new state of the art or competitive performance to predominant approaches.
Models and code are available at www.github.com/pytorch/fairseq/tree/master/examples/data2vec.*
This model was contributed by [edugp](https://huggingface.co/edugp) and [patrickvonplaten](https://huggingface.co/patrickvonplaten).
[sayakpaul](https://github.com/sayakpaul) and [Rocketknight1](https://github.com/Rocketknight1) contributed Data2Vec for vision in TensorFlow.
The original code (for NLP and Speech) can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/data2vec).
The original code for vision can be found [here](https://github.com/facebookresearch/data2vec_vision/tree/main/beit).
## Usage tips
- Data2VecAudio, Data2VecText, and Data2VecVision have all been trained using the same self-supervised learning method.
- For Data2VecAudio, preprocessing is identical to [`Wav2Vec2Model`], including feature extraction
- For Data2VecText, preprocessing is identical to [`RobertaModel`], including tokenization.
- For Data2VecVision, preprocessing is identical to [`BeitModel`], including feature extraction.
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with Data2Vec.
<PipelineTag pipeline="image-classification"/>
- [`Data2VecVisionForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
- To fine-tune [`TFData2VecVisionForImageClassification`] on a custom dataset, see [this notebook](https://colab.research.google.com/github/sayakpaul/TF-2.0-Hacks/blob/master/data2vec_vision_image_classification.ipynb).
**Data2VecText documentation resources**
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Causal language modeling task guide](../tasks/language_modeling)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
**Data2VecAudio documentation resources**
- [Audio classification task guide](../tasks/audio_classification)
- [Automatic speech recognition task guide](../tasks/asr)
**Data2VecVision documentation resources**
- [Image classification](../tasks/image_classification)
- [Semantic segmentation](../tasks/semantic_segmentation)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## Data2VecTextConfig
[[autodoc]] Data2VecTextConfig
## Data2VecAudioConfig
[[autodoc]] Data2VecAudioConfig
## Data2VecVisionConfig
[[autodoc]] Data2VecVisionConfig
<frameworkcontent>
<pt>
## Data2VecAudioModel
[[autodoc]] Data2VecAudioModel
- forward
## Data2VecAudioForAudioFrameClassification
[[autodoc]] Data2VecAudioForAudioFrameClassification
- forward
## Data2VecAudioForCTC
[[autodoc]] Data2VecAudioForCTC
- forward
## Data2VecAudioForSequenceClassification
[[autodoc]] Data2VecAudioForSequenceClassification
- forward
## Data2VecAudioForXVector
[[autodoc]] Data2VecAudioForXVector
- forward
## Data2VecTextModel
[[autodoc]] Data2VecTextModel
- forward
## Data2VecTextForCausalLM
[[autodoc]] Data2VecTextForCausalLM
- forward
## Data2VecTextForMaskedLM
[[autodoc]] Data2VecTextForMaskedLM
- forward
## Data2VecTextForSequenceClassification
[[autodoc]] Data2VecTextForSequenceClassification
- forward
## Data2VecTextForMultipleChoice
[[autodoc]] Data2VecTextForMultipleChoice
- forward
## Data2VecTextForTokenClassification
[[autodoc]] Data2VecTextForTokenClassification
- forward
## Data2VecTextForQuestionAnswering
[[autodoc]] Data2VecTextForQuestionAnswering
- forward
## Data2VecVisionModel
[[autodoc]] Data2VecVisionModel
- forward
## Data2VecVisionForImageClassification
[[autodoc]] Data2VecVisionForImageClassification
- forward
## Data2VecVisionForSemanticSegmentation
[[autodoc]] Data2VecVisionForSemanticSegmentation
- forward
</pt>
<tf>
## TFData2VecVisionModel
[[autodoc]] TFData2VecVisionModel
- call
## TFData2VecVisionForImageClassification
[[autodoc]] TFData2VecVisionForImageClassification
- call
## TFData2VecVisionForSemanticSegmentation
[[autodoc]] TFData2VecVisionForSemanticSegmentation
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/data2vec.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/data2vec.md",
"repo_id": "transformers",
"token_count": 2027
} | 254 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DPR
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=dpr">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-dpr-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/dpr-question_encoder-bert-base-multilingual">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
Dense Passage Retrieval (DPR) is a set of tools and models for state-of-the-art open-domain Q&A research. It was
introduced in [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by
Vladimir Karpukhin, Barlas OÄuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, Wen-tau Yih.
The abstract from the paper is the following:
*Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional
sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can
be practically implemented using dense representations alone, where embeddings are learned from a small number of
questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets,
our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% absolute in terms of top-20 passage
retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA
benchmarks.*
This model was contributed by [lhoestq](https://huggingface.co/lhoestq). The original code can be found [here](https://github.com/facebookresearch/DPR).
## Usage tips
- DPR consists in three models:
* Question encoder: encode questions as vectors
* Context encoder: encode contexts as vectors
* Reader: extract the answer of the questions inside retrieved contexts, along with a relevance score (high if the inferred span actually answers the question).
## DPRConfig
[[autodoc]] DPRConfig
## DPRContextEncoderTokenizer
[[autodoc]] DPRContextEncoderTokenizer
## DPRContextEncoderTokenizerFast
[[autodoc]] DPRContextEncoderTokenizerFast
## DPRQuestionEncoderTokenizer
[[autodoc]] DPRQuestionEncoderTokenizer
## DPRQuestionEncoderTokenizerFast
[[autodoc]] DPRQuestionEncoderTokenizerFast
## DPRReaderTokenizer
[[autodoc]] DPRReaderTokenizer
## DPRReaderTokenizerFast
[[autodoc]] DPRReaderTokenizerFast
## DPR specific outputs
[[autodoc]] models.dpr.modeling_dpr.DPRContextEncoderOutput
[[autodoc]] models.dpr.modeling_dpr.DPRQuestionEncoderOutput
[[autodoc]] models.dpr.modeling_dpr.DPRReaderOutput
<frameworkcontent>
<pt>
## DPRContextEncoder
[[autodoc]] DPRContextEncoder
- forward
## DPRQuestionEncoder
[[autodoc]] DPRQuestionEncoder
- forward
## DPRReader
[[autodoc]] DPRReader
- forward
</pt>
<tf>
## TFDPRContextEncoder
[[autodoc]] TFDPRContextEncoder
- call
## TFDPRQuestionEncoder
[[autodoc]] TFDPRQuestionEncoder
- call
## TFDPRReader
[[autodoc]] TFDPRReader
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/dpr.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/dpr.md",
"repo_id": "transformers",
"token_count": 1170
} | 255 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# FNet
## Overview
The FNet model was proposed in [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by
James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT
model with a fourier transform which returns only the real parts of the transform. The model is significantly faster
than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97%
accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the
paper is the following:
*We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the
self-attention sublayers with simple linear transformations that "mix" input tokens. These linear mixers, along with
standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text
classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder
with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE
benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths,
our FNet model is significantly faster: when compared to the "efficient" Transformers on the Long Range Arena
benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all
sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint
and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models
outperform Transformer counterparts.*
This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/google-research/google-research/tree/master/f_net).
## Usage tips
The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with
maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum
sequence length for fine-tuning and inference.
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## FNetConfig
[[autodoc]] FNetConfig
## FNetTokenizer
[[autodoc]] FNetTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## FNetTokenizerFast
[[autodoc]] FNetTokenizerFast
## FNetModel
[[autodoc]] FNetModel
- forward
## FNetForPreTraining
[[autodoc]] FNetForPreTraining
- forward
## FNetForMaskedLM
[[autodoc]] FNetForMaskedLM
- forward
## FNetForNextSentencePrediction
[[autodoc]] FNetForNextSentencePrediction
- forward
## FNetForSequenceClassification
[[autodoc]] FNetForSequenceClassification
- forward
## FNetForMultipleChoice
[[autodoc]] FNetForMultipleChoice
- forward
## FNetForTokenClassification
[[autodoc]] FNetForTokenClassification
- forward
## FNetForQuestionAnswering
[[autodoc]] FNetForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/fnet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/fnet.md",
"repo_id": "transformers",
"token_count": 1150
} | 256 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LeViT
## Overview
The LeViT model was proposed in [LeViT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze. LeViT improves the [Vision Transformer (ViT)](vit) in performance and efficiency by a few architectural differences such as activation maps with decreasing resolutions in Transformers and the introduction of an attention bias to integrate positional information.
The abstract from the paper is the following:
*We design a family of image classification architectures that optimize the trade-off between accuracy
and efficiency in a high-speed regime. Our work exploits recent findings in attention-based architectures,
which are competitive on highly parallel processing hardware. We revisit principles from the extensive
literature on convolutional neural networks to apply them to transformers, in particular activation maps
with decreasing resolutions. We also introduce the attention bias, a new way to integrate positional information
in vision transformers. As a result, we propose LeVIT: a hybrid neural network for fast inference image classification.
We consider different measures of efficiency on different hardware platforms, so as to best reflect a wide range of
application scenarios. Our extensive experiments empirically validate our technical choices and show they are suitable
to most architectures. Overall, LeViT significantly outperforms existing convnets and vision transformers with respect
to the speed/accuracy tradeoff. For example, at 80% ImageNet top-1 accuracy, LeViT is 5 times faster than EfficientNet on CPU. *
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/levit_architecture.png"
alt="drawing" width="600"/>
<small> LeViT Architecture. Taken from the <a href="https://arxiv.org/abs/2104.01136">original paper</a>.</small>
This model was contributed by [anugunj](https://huggingface.co/anugunj). The original code can be found [here](https://github.com/facebookresearch/LeViT).
## Usage tips
- Compared to ViT, LeViT models use an additional distillation head to effectively learn from a teacher (which, in the LeViT paper, is a ResNet like-model). The distillation head is learned through backpropagation under supervision of a ResNet like-model. They also draw inspiration from convolution neural networks to use activation maps with decreasing resolutions to increase the efficiency.
- There are 2 ways to fine-tune distilled models, either (1) in a classic way, by only placing a prediction head on top
of the final hidden state and not using the distillation head, or (2) by placing both a prediction head and distillation
head on top of the final hidden state. In that case, the prediction head is trained using regular cross-entropy between
the prediction of the head and the ground-truth label, while the distillation prediction head is trained using hard distillation
(cross-entropy between the prediction of the distillation head and the label predicted by the teacher). At inference time,
one takes the average prediction between both heads as final prediction. (2) is also called "fine-tuning with distillation",
because one relies on a teacher that has already been fine-tuned on the downstream dataset. In terms of models, (1) corresponds
to [`LevitForImageClassification`] and (2) corresponds to [`LevitForImageClassificationWithTeacher`].
- All released checkpoints were pre-trained and fine-tuned on [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k)
(also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). only. No external data was used. This is in
contrast with the original ViT model, which used external data like the JFT-300M dataset/Imagenet-21k for
pre-training.
- The authors of LeViT released 5 trained LeViT models, which you can directly plug into [`LevitModel`] or [`LevitForImageClassification`].
Techniques like data augmentation, optimization, and regularization were used in order to simulate training on a much larger dataset
(while only using ImageNet-1k for pre-training). The 5 variants available are (all trained on images of size 224x224):
*facebook/levit-128S*, *facebook/levit-128*, *facebook/levit-192*, *facebook/levit-256* and
*facebook/levit-384*. Note that one should use [`LevitImageProcessor`] in order to
prepare images for the model.
- [`LevitForImageClassificationWithTeacher`] currently supports only inference and not training or fine-tuning.
- You can check out demo notebooks regarding inference as well as fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer)
(you can just replace [`ViTFeatureExtractor`] by [`LevitImageProcessor`] and [`ViTForImageClassification`] by [`LevitForImageClassification`] or [`LevitForImageClassificationWithTeacher`]).
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with LeViT.
<PipelineTag pipeline="image-classification"/>
- [`LevitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
- See also: [Image classification task guide](../tasks/image_classification)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## LevitConfig
[[autodoc]] LevitConfig
## LevitFeatureExtractor
[[autodoc]] LevitFeatureExtractor
- __call__
## LevitImageProcessor
[[autodoc]] LevitImageProcessor
- preprocess
## LevitModel
[[autodoc]] LevitModel
- forward
## LevitForImageClassification
[[autodoc]] LevitForImageClassification
- forward
## LevitForImageClassificationWithTeacher
[[autodoc]] LevitForImageClassificationWithTeacher
- forward
| transformers/docs/source/en/model_doc/levit.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/levit.md",
"repo_id": "transformers",
"token_count": 1801
} | 257 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# OpenAI GPT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=openai-gpt">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-openai--gpt-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/openai-gpt">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
OpenAI GPT model was proposed in [Improving Language Understanding by Generative Pre-Training](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf)
by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. It's a causal (unidirectional) transformer
pre-trained using language modeling on a large corpus will long range dependencies, the Toronto Book Corpus.
The abstract from the paper is the following:
*Natural language understanding comprises a wide range of diverse tasks such as textual entailment, question answering,
semantic similarity assessment, and document classification. Although large unlabeled text corpora are abundant,
labeled data for learning these specific tasks is scarce, making it challenging for discriminatively trained models to
perform adequately. We demonstrate that large gains on these tasks can be realized by generative pretraining of a
language model on a diverse corpus of unlabeled text, followed by discriminative fine-tuning on each specific task. In
contrast to previous approaches, we make use of task-aware input transformations during fine-tuning to achieve
effective transfer while requiring minimal changes to the model architecture. We demonstrate the effectiveness of our
approach on a wide range of benchmarks for natural language understanding. Our general task-agnostic model outperforms
discriminatively trained models that use architectures specifically crafted for each task, significantly improving upon
the state of the art in 9 out of the 12 tasks studied.*
[Write With Transformer](https://transformer.huggingface.co/doc/gpt) is a webapp created and hosted by Hugging Face
showcasing the generative capabilities of several models. GPT is one of them.
This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/openai/finetune-transformer-lm).
## Usage tips
- GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
the left.
- GPT was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next
token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be
observed in the *run_generation.py* example script.
Note:
If you want to reproduce the original tokenization process of the *OpenAI GPT* paper, you will need to install `ftfy`
and `SpaCy`:
```bash
pip install spacy ftfy==4.4.3
python -m spacy download en
```
If you don't install `ftfy` and `SpaCy`, the [`OpenAIGPTTokenizer`] will default to tokenize
using BERT's `BasicTokenizer` followed by Byte-Pair Encoding (which should be fine for most usage, don't worry).
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with OpenAI GPT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<PipelineTag pipeline="text-classification"/>
- A blog post on [outperforming OpenAI GPT-3 with SetFit for text-classification](https://www.philschmid.de/getting-started-setfit).
- See also: [Text classification task guide](../tasks/sequence_classification)
<PipelineTag pipeline="text-generation"/>
- A blog on how to [Finetune a non-English GPT-2 Model with Hugging Face](https://www.philschmid.de/fine-tune-a-non-english-gpt-2-model-with-huggingface).
- A blog on [How to generate text: using different decoding methods for language generation with Transformers](https://huggingface.co/blog/how-to-generate) with GPT-2.
- A blog on [Training CodeParrot ðŠ from Scratch](https://huggingface.co/blog/codeparrot), a large GPT-2 model.
- A blog on [Faster Text Generation with TensorFlow and XLA](https://huggingface.co/blog/tf-xla-generate) with GPT-2.
- A blog on [How to train a Language Model with Megatron-LM](https://huggingface.co/blog/megatron-training) with a GPT-2 model.
- A notebook on how to [finetune GPT2 to generate lyrics in the style of your favorite artist](https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb). ð
- A notebook on how to [finetune GPT2 to generate tweets in the style of your favorite Twitter user](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb). ð
- [Causal language modeling](https://huggingface.co/course/en/chapter7/6?fw=pt#training-a-causal-language-model-from-scratch) chapter of the ð€ Hugging Face Course.
- [`OpenAIGPTLMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#gpt-2gpt-and-causal-language-modeling), [text generation example script](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-generation/run_generation.py) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb).
- [`TFOpenAIGPTLMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_clmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
- See also: [Causal language modeling task guide](../tasks/language_modeling)
<PipelineTag pipeline="token-classification"/>
- A course material on [Byte-Pair Encoding tokenization](https://huggingface.co/course/en/chapter6/5).
## OpenAIGPTConfig
[[autodoc]] OpenAIGPTConfig
## OpenAIGPTTokenizer
[[autodoc]] OpenAIGPTTokenizer
- save_vocabulary
## OpenAIGPTTokenizerFast
[[autodoc]] OpenAIGPTTokenizerFast
## OpenAI specific outputs
[[autodoc]] models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput
[[autodoc]] models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput
<frameworkcontent>
<pt>
## OpenAIGPTModel
[[autodoc]] OpenAIGPTModel
- forward
## OpenAIGPTLMHeadModel
[[autodoc]] OpenAIGPTLMHeadModel
- forward
## OpenAIGPTDoubleHeadsModel
[[autodoc]] OpenAIGPTDoubleHeadsModel
- forward
## OpenAIGPTForSequenceClassification
[[autodoc]] OpenAIGPTForSequenceClassification
- forward
</pt>
<tf>
## TFOpenAIGPTModel
[[autodoc]] TFOpenAIGPTModel
- call
## TFOpenAIGPTLMHeadModel
[[autodoc]] TFOpenAIGPTLMHeadModel
- call
## TFOpenAIGPTDoubleHeadsModel
[[autodoc]] TFOpenAIGPTDoubleHeadsModel
- call
## TFOpenAIGPTForSequenceClassification
[[autodoc]] TFOpenAIGPTForSequenceClassification
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/openai-gpt.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/openai-gpt.md",
"repo_id": "transformers",
"token_count": 2422
} | 258 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ProphetNet
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=prophetnet">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-prophetnet-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/prophetnet-large-uncased">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
The ProphetNet model was proposed in [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei
Zhang, Ming Zhou on 13 Jan, 2020.
ProphetNet is an encoder-decoder model and can predict n-future tokens for "ngram" language modeling instead of just
the next token.
The abstract from the paper is the following:
*In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel
self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of
the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by
n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time
step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent
overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale
dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for
abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new
state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.*
The Authors' code can be found [here](https://github.com/microsoft/ProphetNet).
## Usage tips
- ProphetNet is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
the left.
- The model architecture is based on the original Transformer, but replaces the âstandardâ self-attention mechanism in the decoder by a a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism.
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
- [Translation task guide](../tasks/translation)
- [Summarization task guide](../tasks/summarization)
## ProphetNetConfig
[[autodoc]] ProphetNetConfig
## ProphetNetTokenizer
[[autodoc]] ProphetNetTokenizer
## ProphetNet specific outputs
[[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput
[[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput
[[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput
[[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput
## ProphetNetModel
[[autodoc]] ProphetNetModel
- forward
## ProphetNetEncoder
[[autodoc]] ProphetNetEncoder
- forward
## ProphetNetDecoder
[[autodoc]] ProphetNetDecoder
- forward
## ProphetNetForConditionalGeneration
[[autodoc]] ProphetNetForConditionalGeneration
- forward
## ProphetNetForCausalLM
[[autodoc]] ProphetNetForCausalLM
- forward
| transformers/docs/source/en/model_doc/prophetnet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/prophetnet.md",
"repo_id": "transformers",
"token_count": 1170
} | 259 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# RWKV
## Overview
The RWKV model was proposed in [this repo](https://github.com/BlinkDL/RWKV-LM)
It suggests a tweak in the traditional Transformer attention to make it linear. This way, the model can be used as recurrent network: passing inputs for timestamp 0 and timestamp 1 together is the same as passing inputs at timestamp 0, then inputs at timestamp 1 along with the state of timestamp 0 (see example below).
This can be more efficient than a regular Transformer and can deal with sentence of any length (even if the model uses a fixed context length for training).
This model was contributed by [sgugger](https://huggingface.co/sgugger).
The original code can be found [here](https://github.com/BlinkDL/RWKV-LM).
## Usage example
```py
import torch
from transformers import AutoTokenizer, RwkvConfig, RwkvModel
model = RwkvModel.from_pretrained("sgugger/rwkv-430M-pile")
tokenizer = AutoTokenizer.from_pretrained("sgugger/rwkv-430M-pile")
inputs = tokenizer("This is an example.", return_tensors="pt")
# Feed everything to the model
outputs = model(inputs["input_ids"])
output_whole = outputs.last_hidden_state
outputs = model(inputs["input_ids"][:, :2])
output_one = outputs.last_hidden_state
# Using the state computed on the first inputs, we will get the same output
outputs = model(inputs["input_ids"][:, 2:], state=outputs.state)
output_two = outputs.last_hidden_state
torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5)
```
If you want to make sure the model stops generating when `'\n\n'` is detected, we recommend using the following stopping criteria:
```python
from transformers import StoppingCriteria
class RwkvStoppingCriteria(StoppingCriteria):
def __init__(self, eos_sequence = [187,187], eos_token_id = 537):
self.eos_sequence = eos_sequence
self.eos_token_id = eos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
last_2_ids = input_ids[:,-2:].tolist()
return self.eos_sequence in last_2_ids
output = model.generate(inputs["input_ids"], max_new_tokens=64, stopping_criteria = [RwkvStoppingCriteria()])
```
## RwkvConfig
[[autodoc]] RwkvConfig
## RwkvModel
[[autodoc]] RwkvModel
- forward
## RwkvLMHeadModel
[[autodoc]] RwkvForCausalLM
- forward
## Rwkv attention and the recurrent formulas
In a traditional auto-regressive Transformer, attention is written as
$$O = \hbox{softmax}(QK^{T} / \sqrt{d}) V$$
with \\(Q\\), \\(K\\) and \\(V\\) are matrices of shape `seq_len x hidden_size` named query, key and value (they are actually bigger matrices with a batch dimension and an attention head dimension but we're only interested in the last two, which is where the matrix product is taken, so for the sake of simplicity we only consider those two). The product \\(QK^{T}\\) then has shape `seq_len x seq_len` and we can take the matrix product with \\(V\\) to get the output \\(O\\) of the same shape as the others.
Replacing the softmax by its value gives:
$$O_{i} = \frac{\sum_{j=1}^{i} e^{Q_{i} K_{j}^{T} / \sqrt{d}} V_{j}}{\sum_{j=1}^{i} e^{Q_{i} K_{j}^{T} / \sqrt{d}}}$$
Note that the entries in \\(QK^{T}\\) corresponding to \\(j > i\\) are masked (the sum stops at j) because the attention is not allowed to look at future tokens (only past ones).
In comparison, the RWKV attention is given by
$$O_{i} = \sigma(R_{i}) \frac{\sum_{j=1}^{i} e^{W_{i-j} + K_{j}} V_{j}}{\sum_{j=1}^{i} e^{W_{i-j} + K_{j}}}$$
where \\(R\\) is a new matrix called receptance by the author, \\(K\\) and \\(V\\) are still the key and value (\\(\sigma\\) here is the sigmoid function). \\(W\\) is a new vector that represents the position of the token and is given by
$$W_{0} = u \hbox{ and } W_{k} = (k-1)w \hbox{ for } k \geq 1$$
with \\(u\\) and \\(w\\) learnable parameters called in the code `time_first` and `time_decay` respectively. The numerator and denominator can both be expressed recursively. Naming them \\(N_{i}\\) and \\(D_{i}\\) we have:
$$N_{i} = e^{u + K_{i}} V_{i} + \hat{N}_{i} \hbox{ where } \hat{N}_{i} = e^{K_{i-1}} V_{i-1} + e^{w + K_{i-2}} V_{i-2} \cdots + e^{(i-2)w + K_{1}} V_{1}$$
so \\(\hat{N}_{i}\\) (called `numerator_state` in the code) satisfies
$$\hat{N}_{0} = 0 \hbox{ and } \hat{N}_{j+1} = e^{K_{j}} V_{j} + e^{w} \hat{N}_{j}$$
and
$$D_{i} = e^{u + K_{i}} + \hat{D}_{i} \hbox{ where } \hat{D}_{i} = e^{K_{i-1}} + e^{w + K_{i-2}} \cdots + e^{(i-2)w + K_{1}}$$
so \\(\hat{D}_{i}\\) (called `denominator_state` in the code) satisfies
$$\hat{D}_{0} = 0 \hbox{ and } \hat{D}_{j+1} = e^{K_{j}} + e^{w} \hat{D}_{j}$$
The actual recurrent formula used are a tiny bit more complex, as for numerical stability we don't want to compute exponentials of big numbers. Usually the softmax is not computed as is, but the exponential of the maximum term is divided of the numerator and denominator:
$$\frac{e^{x_{i}}}{\sum_{j=1}^{n} e^{x_{j}}} = \frac{e^{x_{i} - M}}{\sum_{j=1}^{n} e^{x_{j} - M}}$$
with \\(M\\) the maximum of all \\(x_{j}\\). So here on top of saving the numerator state (\\(\hat{N}\\)) and the denominator state (\\(\hat{D}\\)) we also keep track of the maximum of all terms encountered in the exponentials. So we actually use
$$\tilde{N}_{i} = e^{-M_{i}} \hat{N}_{i} \hbox{ and } \tilde{D}_{i} = e^{-M_{i}} \hat{D}_{i}$$
defined by the following recurrent formulas:
$$\tilde{N}_{0} = 0 \hbox{ and } \tilde{N}_{j+1} = e^{K_{j} - q} V_{j} + e^{w + M_{j} - q} \tilde{N}_{j} \hbox{ where } q = \max(K_{j}, w + M_{j})$$
and
$$\tilde{D}_{0} = 0 \hbox{ and } \tilde{D}_{j+1} = e^{K_{j} - q} + e^{w + M_{j} - q} \tilde{D}_{j} \hbox{ where } q = \max(K_{j}, w + M_{j})$$
and \\(M_{j+1} = q\\). With those, we can then compute
$$N_{i} = e^{u + K_{i} - q} V_{i} + e^{M_{i}} \tilde{N}_{i} \hbox{ where } q = \max(u + K_{i}, M_{i})$$
and
$$D_{i} = e^{u + K_{i} - q} + e^{M_{i}} \tilde{D}_{i} \hbox{ where } q = \max(u + K_{i}, M_{i})$$
which finally gives us
$$O_{i} = \sigma(R_{i}) \frac{N_{i}}{D_{i}}$$ | transformers/docs/source/en/model_doc/rwkv.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/rwkv.md",
"repo_id": "transformers",
"token_count": 2548
} | 260 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
specific language governing permissions and limitations under the License. -->
# TrOCR
## Overview
The TrOCR model was proposed in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained
Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang,
Zhoujun Li, Furu Wei. TrOCR consists of an image Transformer encoder and an autoregressive text Transformer decoder to
perform [optical character recognition (OCR)](https://en.wikipedia.org/wiki/Optical_character_recognition).
The abstract from the paper is the following:
*Text recognition is a long-standing research problem for document digitalization. Existing approaches for text recognition
are usually built based on CNN for image understanding and RNN for char-level text generation. In addition, another language
model is usually needed to improve the overall accuracy as a post-processing step. In this paper, we propose an end-to-end
text recognition approach with pre-trained image Transformer and text Transformer models, namely TrOCR, which leverages the
Transformer architecture for both image understanding and wordpiece-level text generation. The TrOCR model is simple but
effective, and can be pre-trained with large-scale synthetic data and fine-tuned with human-labeled datasets. Experiments
show that the TrOCR model outperforms the current state-of-the-art models on both printed and handwritten text recognition
tasks.*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/trocr_architecture.jpg"
alt="drawing" width="600"/>
<small> TrOCR architecture. Taken from the <a href="https://arxiv.org/abs/2109.10282">original paper</a>. </small>
Please refer to the [`VisionEncoderDecoder`] class on how to use this model.
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found
[here](https://github.com/microsoft/unilm/tree/6f60612e7cc86a2a1ae85c47231507a587ab4e01/trocr).
## Usage tips
- The quickest way to get started with TrOCR is by checking the [tutorial
notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR), which show how to use the model
at inference time as well as fine-tuning on custom data.
- TrOCR is pre-trained in 2 stages before being fine-tuned on downstream datasets. It achieves state-of-the-art results
on both printed (e.g. the [SROIE dataset](https://paperswithcode.com/dataset/sroie) and handwritten (e.g. the [IAM
Handwriting dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database>) text recognition tasks. For more
information, see the [official models](https://huggingface.co/models?other=trocr>).
- TrOCR is always used within the [VisionEncoderDecoder](vision-encoder-decoder) framework.
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with TrOCR. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<PipelineTag pipeline="text-classification"/>
- A blog post on [Accelerating Document AI](https://huggingface.co/blog/document-ai) with TrOCR.
- A blog post on how to [Document AI](https://github.com/philschmid/document-ai-transformers) with TrOCR.
- A notebook on how to [finetune TrOCR on IAM Handwriting Database using Seq2SeqTrainer](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Fine_tune_TrOCR_on_IAM_Handwriting_Database_using_Seq2SeqTrainer.ipynb).
- A notebook on [inference with TrOCR](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Inference_with_TrOCR_%2B_Gradio_demo.ipynb) and Gradio demo.
- A notebook on [finetune TrOCR on the IAM Handwriting Database](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Fine_tune_TrOCR_on_IAM_Handwriting_Database_using_native_PyTorch.ipynb) using native PyTorch.
- A notebook on [evaluating TrOCR on the IAM test set](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Evaluating_TrOCR_base_handwritten_on_the_IAM_test_set.ipynb).
<PipelineTag pipeline="text-generation"/>
- [Casual language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) task guide.
â¡ïž Inference
- An interactive-demo on [TrOCR handwritten character recognition](https://huggingface.co/spaces/nielsr/TrOCR-handwritten).
## Inference
TrOCR's [`VisionEncoderDecoder`] model accepts images as input and makes use of
[`~generation.GenerationMixin.generate`] to autoregressively generate text given the input image.
The [`ViTImageProcessor`/`DeiTImageProcessor`] class is responsible for preprocessing the input image and
[`RobertaTokenizer`/`XLMRobertaTokenizer`] decodes the generated target tokens to the target string. The
[`TrOCRProcessor`] wraps [`ViTImageProcessor`/`DeiTImageProcessor`] and [`RobertaTokenizer`/`XLMRobertaTokenizer`]
into a single instance to both extract the input features and decode the predicted token ids.
- Step-by-step Optical Character Recognition (OCR)
``` py
>>> from transformers import TrOCRProcessor, VisionEncoderDecoderModel
>>> import requests
>>> from PIL import Image
>>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
>>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")
>>> # load image from the IAM dataset
>>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> pixel_values = processor(image, return_tensors="pt").pixel_values
>>> generated_ids = model.generate(pixel_values)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```
See the [model hub](https://huggingface.co/models?filter=trocr) to look for TrOCR checkpoints.
## TrOCRConfig
[[autodoc]] TrOCRConfig
## TrOCRProcessor
[[autodoc]] TrOCRProcessor
- __call__
- from_pretrained
- save_pretrained
- batch_decode
- decode
## TrOCRForCausalLM
[[autodoc]] TrOCRForCausalLM
- forward
| transformers/docs/source/en/model_doc/trocr.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/trocr.md",
"repo_id": "transformers",
"token_count": 2132
} | 261 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# VisualBERT
## Overview
The VisualBERT model was proposed in [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
VisualBERT is a neural network trained on a variety of (image, text) pairs.
The abstract from the paper is the following:
*We propose VisualBERT, a simple and flexible framework for modeling a broad range of vision-and-language tasks.
VisualBERT consists of a stack of Transformer layers that implicitly align elements of an input text and regions in an
associated input image with self-attention. We further propose two visually-grounded language model objectives for
pre-training VisualBERT on image caption data. Experiments on four vision-and-language tasks including VQA, VCR, NLVR2,
and Flickr30K show that VisualBERT outperforms or rivals with state-of-the-art models while being significantly
simpler. Further analysis demonstrates that VisualBERT can ground elements of language to image regions without any
explicit supervision and is even sensitive to syntactic relationships, tracking, for example, associations between
verbs and image regions corresponding to their arguments.*
This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/uclanlp/visualbert).
## Usage tips
1. Most of the checkpoints provided work with the [`VisualBertForPreTraining`] configuration. Other
checkpoints provided are the fine-tuned checkpoints for down-stream tasks - VQA ('visualbert-vqa'), VCR
('visualbert-vcr'), NLVR2 ('visualbert-nlvr2'). Hence, if you are not working on these downstream tasks, it is
recommended that you use the pretrained checkpoints.
2. For the VCR task, the authors use a fine-tuned detector for generating visual embeddings, for all the checkpoints.
We do not provide the detector and its weights as a part of the package, but it will be available in the research
projects, and the states can be loaded directly into the detector provided.
VisualBERT is a multi-modal vision and language model. It can be used for visual question answering, multiple choice,
visual reasoning and region-to-phrase correspondence tasks. VisualBERT uses a BERT-like transformer to prepare
embeddings for image-text pairs. Both the text and visual features are then projected to a latent space with identical
dimension.
To feed images to the model, each image is passed through a pre-trained object detector and the regions and the
bounding boxes are extracted. The authors use the features generated after passing these regions through a pre-trained
CNN like ResNet as visual embeddings. They also add absolute position embeddings, and feed the resulting sequence of
vectors to a standard BERT model. The text input is concatenated in the front of the visual embeddings in the embedding
layer, and is expected to be bound by [CLS] and a [SEP] tokens, as in BERT. The segment IDs must also be set
appropriately for the textual and visual parts.
The [`BertTokenizer`] is used to encode the text. A custom detector/image processor must be used
to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models:
- [VisualBERT VQA demo notebook](https://github.com/huggingface/transformers/tree/main/examples/research_projects/visual_bert) : This notebook
contains an example on VisualBERT VQA.
- [Generate Embeddings for VisualBERT (Colab Notebook)](https://colab.research.google.com/drive/1bLGxKdldwqnMVA5x4neY7-l_8fKGWQYI?usp=sharing) : This notebook contains
an example on how to generate visual embeddings.
The following example shows how to get the last hidden state using [`VisualBertModel`]:
```python
>>> import torch
>>> from transformers import BertTokenizer, VisualBertModel
>>> model = VisualBertModel.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> inputs = tokenizer("What is the man eating?", return_tensors="pt")
>>> # this is a custom function that returns the visual embeddings given the image path
>>> visual_embeds = get_visual_embeddings(image_path)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> inputs.update(
... {
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask,
... }
... )
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```
## VisualBertConfig
[[autodoc]] VisualBertConfig
## VisualBertModel
[[autodoc]] VisualBertModel
- forward
## VisualBertForPreTraining
[[autodoc]] VisualBertForPreTraining
- forward
## VisualBertForQuestionAnswering
[[autodoc]] VisualBertForQuestionAnswering
- forward
## VisualBertForMultipleChoice
[[autodoc]] VisualBertForMultipleChoice
- forward
## VisualBertForVisualReasoning
[[autodoc]] VisualBertForVisualReasoning
- forward
## VisualBertForRegionToPhraseAlignment
[[autodoc]] VisualBertForRegionToPhraseAlignment
- forward
| transformers/docs/source/en/model_doc/visual_bert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/visual_bert.md",
"repo_id": "transformers",
"token_count": 1680
} | 262 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# XGLM
## Overview
The XGLM model was proposed in [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668)
by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal,
Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo,
Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
The abstract from the paper is the following:
*Large-scale autoregressive language models such as GPT-3 are few-shot learners that can perform a wide range of language
tasks without fine-tuning. While these models are known to be able to jointly represent many different languages,
their training data is dominated by English, potentially limiting their cross-lingual generalization.
In this work, we train multilingual autoregressive language models on a balanced corpus covering a diverse set of languages,
and study their few- and zero-shot learning capabilities in a wide range of tasks. Our largest model with 7.5 billion parameters
sets new state of the art in few-shot learning in more than 20 representative languages, outperforming GPT-3 of comparable size
in multilingual commonsense reasoning (with +7.4% absolute accuracy improvement in 0-shot settings and +9.4% in 4-shot settings)
and natural language inference (+5.4% in each of 0-shot and 4-shot settings). On the FLORES-101 machine translation benchmark,
our model outperforms GPT-3 on 171 out of 182 translation directions with 32 training examples, while surpassing the
official supervised baseline in 45 directions. We present a detailed analysis of where the model succeeds and fails,
showing in particular that it enables cross-lingual in-context learning on some tasks, while there is still room for improvement
on surface form robustness and adaptation to tasks that do not have a natural cloze form. Finally, we evaluate our models
in social value tasks such as hate speech detection in five languages and find it has limitations similar to comparable sized GPT-3 models.*
This model was contributed by [Suraj](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/xglm).
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
## XGLMConfig
[[autodoc]] XGLMConfig
## XGLMTokenizer
[[autodoc]] XGLMTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## XGLMTokenizerFast
[[autodoc]] XGLMTokenizerFast
<frameworkcontent>
<pt>
## XGLMModel
[[autodoc]] XGLMModel
- forward
## XGLMForCausalLM
[[autodoc]] XGLMForCausalLM
- forward
</pt>
<tf>
## TFXGLMModel
[[autodoc]] TFXGLMModel
- call
## TFXGLMForCausalLM
[[autodoc]] TFXGLMForCausalLM
- call
</tf>
<jax>
## FlaxXGLMModel
[[autodoc]] FlaxXGLMModel
- __call__
## FlaxXGLMForCausalLM
[[autodoc]] FlaxXGLMForCausalLM
- __call__
</jax>
</frameworkcontent> | transformers/docs/source/en/model_doc/xglm.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/xglm.md",
"repo_id": "transformers",
"token_count": 1137
} | 263 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Pipelines for inference
The [`pipeline`] makes it simple to use any model from the [Hub](https://huggingface.co/models) for inference on any language, computer vision, speech, and multimodal tasks. Even if you don't have experience with a specific modality or aren't familiar with the underlying code behind the models, you can still use them for inference with the [`pipeline`]! This tutorial will teach you to:
* Use a [`pipeline`] for inference.
* Use a specific tokenizer or model.
* Use a [`pipeline`] for audio, vision, and multimodal tasks.
<Tip>
Take a look at the [`pipeline`] documentation for a complete list of supported tasks and available parameters.
</Tip>
## Pipeline usage
While each task has an associated [`pipeline`], it is simpler to use the general [`pipeline`] abstraction which contains
all the task-specific pipelines. The [`pipeline`] automatically loads a default model and a preprocessing class capable
of inference for your task. Let's take the example of using the [`pipeline`] for automatic speech recognition (ASR), or
speech-to-text.
1. Start by creating a [`pipeline`] and specify the inference task:
```py
>>> from transformers import pipeline
>>> transcriber = pipeline(task="automatic-speech-recognition")
```
2. Pass your input to the [`pipeline`]. In the case of speech recognition, this is an audio input file:
```py
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'}
```
Not the result you had in mind? Check out some of the [most downloaded automatic speech recognition models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending)
on the Hub to see if you can get a better transcription.
Let's try the [Whisper large-v2](https://huggingface.co/openai/whisper-large) model from OpenAI. Whisper was released
2 years later than Wav2Vec2, and was trained on close to 10x more data. As such, it beats Wav2Vec2 on most downstream
benchmarks. It also has the added benefit of predicting punctuation and casing, neither of which are possible with
Wav2Vec2.
Let's give it a try here to see how it performs:
```py
>>> transcriber = pipeline(model="openai/whisper-large-v2")
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'}
```
Now this result looks more accurate! For a deep-dive comparison on Wav2Vec2 vs Whisper, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/asr_models).
We really encourage you to check out the Hub for models in different languages, models specialized in your field, and more.
You can check out and compare model results directly from your browser on the Hub to see if it fits or
handles corner cases better than other ones.
And if you don't find a model for your use case, you can always start [training](training) your own!
If you have several inputs, you can pass your input as a list:
```py
transcriber(
[
"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac",
"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac",
]
)
```
Pipelines are great for experimentation as switching from one model to another is trivial; however, there are some ways to optimize them for larger workloads than experimentation. See the following guides that dive into iterating over whole datasets or using pipelines in a webserver:
of the docs:
* [Using pipelines on a dataset](#using-pipelines-on-a-dataset)
* [Using pipelines for a webserver](./pipeline_webserver)
## Parameters
[`pipeline`] supports many parameters; some are task specific, and some are general to all pipelines.
In general, you can specify parameters anywhere you want:
```py
transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1)
out = transcriber(...) # This will use `my_parameter=1`.
out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`.
out = transcriber(...) # This will go back to using `my_parameter=1`.
```
Let's check out 3 important ones:
### Device
If you use `device=n`, the pipeline automatically puts the model on the specified device.
This will work regardless of whether you are using PyTorch or Tensorflow.
```py
transcriber = pipeline(model="openai/whisper-large-v2", device=0)
```
If the model is too large for a single GPU and you are using PyTorch, you can set `device_map="auto"` to automatically
determine how to load and store the model weights. Using the `device_map` argument requires the ð€ [Accelerate](https://huggingface.co/docs/accelerate)
package:
```bash
pip install --upgrade accelerate
```
The following code automatically loads and stores model weights across devices:
```py
transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto")
```
Note that if `device_map="auto"` is passed, there is no need to add the argument `device=device` when instantiating your `pipeline` as you may encounter some unexpected behavior!
### Batch size
By default, pipelines will not batch inference for reasons explained in detail [here](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). The reason is that batching is not necessarily faster, and can actually be quite slower in some cases.
But if it works in your use case, you can use:
```py
transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2)
audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)]
texts = transcriber(audio_filenames)
```
This runs the pipeline on the 4 provided audio files, but it will pass them in batches of 2
to the model (which is on a GPU, where batching is more likely to help) without requiring any further code from you.
The output should always match what you would have received without batching. It is only meant as a way to help you get more speed out of a pipeline.
Pipelines can also alleviate some of the complexities of batching because, for some pipelines, a single item (like a long audio file) needs to be chunked into multiple parts to be processed by a model. The pipeline performs this [*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching) for you.
### Task specific parameters
All tasks provide task specific parameters which allow for additional flexibility and options to help you get your job done.
For instance, the [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] method has a `return_timestamps` parameter which sounds promising for subtitling videos:
```py
>>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True)
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]}
```
As you can see, the model inferred the text and also outputted **when** the various sentences were pronounced.
There are many parameters available for each task, so check out each task's API reference to see what you can tinker with!
For instance, the [`~transformers.AutomaticSpeechRecognitionPipeline`] has a `chunk_length_s` parameter which is helpful
for working on really long audio files (for example, subtitling entire movies or hour-long videos) that a model typically
cannot handle on its own:
```python
>>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30, return_timestamps=True)
>>> transcriber("https://huggingface.co/datasets/sanchit-gandhi/librispeech_long/resolve/main/audio.wav")
{'text': " Chapter 16. I might have told you of the beginning of this liaison in a few lines, but I wanted you to see every step by which we came. I, too, agree to whatever Marguerite wished, Marguerite to be unable to live apart from me. It was the day after the evening...
```
If you can't find a parameter that would really help you out, feel free to [request it](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)!
## Using pipelines on a dataset
The pipeline can also run inference on a large dataset. The easiest way we recommend doing this is by using an iterator:
```py
def data():
for i in range(1000):
yield f"My example {i}"
pipe = pipeline(model="openai-community/gpt2", device=0)
generated_characters = 0
for out in pipe(data()):
generated_characters += len(out[0]["generated_text"])
```
The iterator `data()` yields each result, and the pipeline automatically
recognizes the input is iterable and will start fetching the data while
it continues to process it on the GPU (this uses [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) under the hood).
This is important because you don't have to allocate memory for the whole dataset
and you can feed the GPU as fast as possible.
Since batching could speed things up, it may be useful to try tuning the `batch_size` parameter here.
The simplest way to iterate over a dataset is to just load one from ð€ [Datasets](https://github.com/huggingface/datasets/):
```py
# KeyDataset is a util that will just output the item we're interested in.
from transformers.pipelines.pt_utils import KeyDataset
from datasets import load_dataset
pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]")
for out in pipe(KeyDataset(dataset, "audio")):
print(out)
```
## Using pipelines for a webserver
<Tip>
Creating an inference engine is a complex topic which deserves it's own
page.
</Tip>
[Link](./pipeline_webserver)
## Vision pipeline
Using a [`pipeline`] for vision tasks is practically identical.
Specify your task and pass your image to the classifier. The image can be a link, a local path or a base64-encoded image. For example, what species of cat is shown below?
![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg)
```py
>>> from transformers import pipeline
>>> vision_classifier = pipeline(model="google/vit-base-patch16-224")
>>> preds = vision_classifier(
... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
... )
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
>>> preds
[{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}]
```
## Text pipeline
Using a [`pipeline`] for NLP tasks is practically identical.
```py
>>> from transformers import pipeline
>>> # This model is a `zero-shot-classification` model.
>>> # It will classify text, except you are free to choose any label you might imagine
>>> classifier = pipeline(model="facebook/bart-large-mnli")
>>> classifier(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
```
## Multimodal pipeline
The [`pipeline`] supports more than one modality. For example, a visual question answering (VQA) task combines text and image. Feel free to use any image link you like and a question you want to ask about the image. The image can be a URL or a local path to the image.
For example, if you use this [invoice image](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png):
```py
>>> from transformers import pipeline
>>> vqa = pipeline(model="impira/layoutlm-document-qa")
>>> vqa(
... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
... question="What is the invoice number?",
... )
[{'score': 0.42515, 'answer': 'us-001', 'start': 16, 'end': 16}]
```
<Tip>
To run the example above you need to have [`pytesseract`](https://pypi.org/project/pytesseract/) installed in addition to ð€ Transformers:
```bash
sudo apt install -y tesseract-ocr
pip install pytesseract
```
</Tip>
## Using `pipeline` on large models with ð€ `accelerate`:
You can easily run `pipeline` on large models using ð€ `accelerate`! First make sure you have installed `accelerate` with `pip install accelerate`.
First load your model using `device_map="auto"`! We will use `facebook/opt-1.3b` for our example.
```py
# pip install accelerate
import torch
from transformers import pipeline
pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto")
output = pipe("This is a cool example!", do_sample=True, top_p=0.95)
```
You can also pass 8-bit loaded models if you install `bitsandbytes` and add the argument `load_in_8bit=True`
```py
# pip install accelerate bitsandbytes
import torch
from transformers import pipeline
pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True})
output = pipe("This is a cool example!", do_sample=True, top_p=0.95)
```
Note that you can replace the checkpoint with any Hugging Face model that supports large model loading, such as BLOOM.
## Creating web demos from pipelines with `gradio`
Pipelines are automatically supported in [Gradio](https://github.com/gradio-app/gradio/), a library that makes creating beautiful and user-friendly machine learning apps on the web a breeze. First, make sure you have Gradio installed:
```
pip install gradio
```
Then, you can create a web demo around an image classification pipeline (or any other pipeline) in a single line of code by calling Gradio's [`Interface.from_pipeline`](https://www.gradio.app/docs/interface#interface-from-pipeline) function to launch the pipeline. This creates an intuitive drag-and-drop interface in your browser:
```py
from transformers import pipeline
import gradio as gr
pipe = pipeline("image-classification", model="google/vit-base-patch16-224")
gr.Interface.from_pipeline(pipe).launch()
```
![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/panda-classification.png)
By default, the web demo runs on a local server. If you'd like to share it with others, you can generate a temporary public
link by setting `share=True` in `launch()`. You can also host your demo on [Hugging Face Spaces](https://huggingface.co/spaces) for a permanent link.
| transformers/docs/source/en/pipeline_tutorial.md/0 | {
"file_path": "transformers/docs/source/en/pipeline_tutorial.md",
"repo_id": "transformers",
"token_count": 4846
} | 264 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
#Â Image Feature Extraction
[[open-in-colab]]
Image feature extraction is the task of extracting semantically meaningful features given an image. This has many use cases, including image similarity and image retrieval. Moreover, most computer vision models can be used for image feature extraction, where one can remove the task-specific head (image classification, object detection etc) and get the features. These features are very useful on a higher level: edge detection, corner detection and so on. They may also contain information about the real world (e.g. what a cat looks like) depending on how deep the model is. Therefore, these outputs can be used to train new classifiers on a specific dataset.
In this guide, you will:
- Learn to build a simple image similarity system on top of the `image-feature-extraction` pipeline.
- Accomplish the same task with bare model inference.
## Image Similarity using `image-feature-extraction` Pipeline
We have two images of cats sitting on top of fish nets, one of them is generated.
```python
from PIL import Image
import requests
img_urls = ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.jpeg"]
image_real = Image.open(requests.get(img_urls[0], stream=True).raw).convert("RGB")
image_gen = Image.open(requests.get(img_urls[1], stream=True).raw).convert("RGB")
```
Let's see the pipeline in action. First, initialize the pipeline. If you don't pass any model to it, the pipeline will be automatically initialized with [google/vit-base-patch16-224](google/vit-base-patch16-224). If you'd like to calculate similarity, set `pool` to True.
```python
import torch
from transformers import pipeline
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pipe = pipeline(task="image-feature-extraction", model_name="google/vit-base-patch16-384", device=DEVICE, pool=True)
```
To infer with `pipe` pass both images to it.
```python
outputs = pipe([image_real, image_gen])
```
The output contains pooled embeddings of those two images.
```python
# get the length of a single output
print(len(outputs[0][0]))
# show outputs
print(outputs)
# 768
# [[[-0.03909236937761307, 0.43381670117378235, -0.06913255900144577,
```
To get the similarity score, we need to pass them to a similarity function.
```python
from torch.nn.functional import cosine_similarity
similarity_score = cosine_similarity(torch.Tensor(outputs[0]),
torch.Tensor(outputs[1]), dim=1)
print(similarity_score)
# tensor([0.6043])
```
If you want to get the last hidden states before pooling, avoid passing any value for the `pool` parameter, as it is set to `False`Â by default. These hidden states are useful for training new classifiers or models based on the features from the model.
```python
pipe = pipeline(task="image-feature-extraction", model_name="google/vit-base-patch16-224", device=DEVICE)
output = pipe(image_real)
```
Since the outputs are unpooled, we get the last hidden states where the first dimension is the batch size, and the last two are the embedding shape.
```python
import numpy as np
print(np.array(outputs).shape)
# (1, 197, 768)
```
## Getting Features and Similarities using `AutoModel`
We can also use `AutoModel`Â class of transformers to get the features. `AutoModel` loads any transformers model with no task-specific head, and we can use this to get the features.
```python
from transformers import AutoImageProcessor, AutoModel
processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
model = AutoModel.from_pretrained("google/vit-base-patch16-224").to(DEVICE)
```
Let's write a simple function for inference. We will pass the inputs to the `processor` first and pass its outputs to the `model`.
```python
def infer(image):
inputs = processor(image, return_tensors="pt").to(DEVICE)
outputs = model(**inputs)
return outputs.pooler_output
```
We can pass the images directly to this function and get the embeddings.
```python
embed_real = infer(image_real)
embed_gen = infer(image_gen)
```
We can get the similarity again over the embeddings.
```python
from torch.nn.functional import cosine_similarity
similarity_score = cosine_similarity(embed_real, embed_gen, dim=1)
print(similarity_score)
# tensor([0.6061], device='cuda:0', grad_fn=<SumBackward1>)
```
| transformers/docs/source/en/tasks/image_feature_extraction.md/0 | {
"file_path": "transformers/docs/source/en/tasks/image_feature_extraction.md",
"repo_id": "transformers",
"token_count": 1539
} | 265 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Translation
[[open-in-colab]]
<Youtube id="1JvfrvZgi6c"/>
Translation converts a sequence of text from one language to another. It is one of several tasks you can formulate as a sequence-to-sequence problem, a powerful framework for returning some output from an input, like translation or summarization. Translation systems are commonly used for translation between different language texts, but it can also be used for speech or some combination in between like text-to-speech or speech-to-text.
This guide will show you how to:
1. Finetune [T5](https://huggingface.co/google-t5/t5-small) on the English-French subset of the [OPUS Books](https://huggingface.co/datasets/opus_books) dataset to translate English text to French.
2. Use your finetuned model for inference.
<Tip>
The task illustrated in this tutorial is supported by the following model architectures:
<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->
[BART](../model_doc/bart), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [Encoder decoder](../model_doc/encoder-decoder), [FairSeq Machine-Translation](../model_doc/fsmt), [GPTSAN-japanese](../model_doc/gptsan-japanese), [LED](../model_doc/led), [LongT5](../model_doc/longt5), [M2M100](../model_doc/m2m_100), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MT5](../model_doc/mt5), [MVP](../model_doc/mvp), [NLLB](../model_doc/nllb), [NLLB-MOE](../model_doc/nllb-moe), [Pegasus](../model_doc/pegasus), [PEGASUS-X](../model_doc/pegasus_x), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [SeamlessM4T](../model_doc/seamless_m4t), [SeamlessM4Tv2](../model_doc/seamless_m4t_v2), [SwitchTransformers](../model_doc/switch_transformers), [T5](../model_doc/t5), [UMT5](../model_doc/umt5), [XLM-ProphetNet](../model_doc/xlm-prophetnet)
<!--End of the generated tip-->
</Tip>
Before you begin, make sure you have all the necessary libraries installed:
```bash
pip install transformers datasets evaluate sacrebleu
```
We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Load OPUS Books dataset
Start by loading the English-French subset of the [OPUS Books](https://huggingface.co/datasets/opus_books) dataset from the ð€ Datasets library:
```py
>>> from datasets import load_dataset
>>> books = load_dataset("opus_books", "en-fr")
```
Split the dataset into a train and test set with the [`~datasets.Dataset.train_test_split`] method:
```py
>>> books = books["train"].train_test_split(test_size=0.2)
```
Then take a look at an example:
```py
>>> books["train"][0]
{'id': '90560',
'translation': {'en': 'But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.',
'fr': 'Mais ce plateau élevé ne mesurait que quelques toises, et bientÎt nous fûmes rentrés dans notre élément.'}}
```
`translation`: an English and French translation of the text.
## Preprocess
<Youtube id="XAR8jnZZuUs"/>
The next step is to load a T5 tokenizer to process the English-French language pairs:
```py
>>> from transformers import AutoTokenizer
>>> checkpoint = "google-t5/t5-small"
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
```
The preprocessing function you want to create needs to:
1. Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks.
2. Tokenize the input (English) and target (French) separately because you can't tokenize French text with a tokenizer pretrained on an English vocabulary.
3. Truncate sequences to be no longer than the maximum length set by the `max_length` parameter.
```py
>>> source_lang = "en"
>>> target_lang = "fr"
>>> prefix = "translate English to French: "
>>> def preprocess_function(examples):
... inputs = [prefix + example[source_lang] for example in examples["translation"]]
... targets = [example[target_lang] for example in examples["translation"]]
... model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True)
... return model_inputs
```
To apply the preprocessing function over the entire dataset, use ð€ Datasets [`~datasets.Dataset.map`] method. You can speed up the `map` function by setting `batched=True` to process multiple elements of the dataset at once:
```py
>>> tokenized_books = books.map(preprocess_function, batched=True)
```
Now create a batch of examples using [`DataCollatorForSeq2Seq`]. It's more efficient to *dynamically pad* the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length.
<frameworkcontent>
<pt>
```py
>>> from transformers import DataCollatorForSeq2Seq
>>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint)
```
</pt>
<tf>
```py
>>> from transformers import DataCollatorForSeq2Seq
>>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint, return_tensors="tf")
```
</tf>
</frameworkcontent>
## Evaluate
Including a metric during training is often helpful for evaluating your model's performance. You can quickly load a evaluation method with the ð€ [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [SacreBLEU](https://huggingface.co/spaces/evaluate-metric/sacrebleu) metric (see the ð€ Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric):
```py
>>> import evaluate
>>> metric = evaluate.load("sacrebleu")
```
Then create a function that passes your predictions and labels to [`~evaluate.EvaluationModule.compute`] to calculate the SacreBLEU score:
```py
>>> import numpy as np
>>> def postprocess_text(preds, labels):
... preds = [pred.strip() for pred in preds]
... labels = [[label.strip()] for label in labels]
... return preds, labels
>>> def compute_metrics(eval_preds):
... preds, labels = eval_preds
... if isinstance(preds, tuple):
... preds = preds[0]
... decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
... labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
... decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
... decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
... result = metric.compute(predictions=decoded_preds, references=decoded_labels)
... result = {"bleu": result["score"]}
... prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
... result["gen_len"] = np.mean(prediction_lens)
... result = {k: round(v, 4) for k, v in result.items()}
... return result
```
Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training.
## Train
<frameworkcontent>
<pt>
<Tip>
If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#train-with-pytorch-trainer)!
</Tip>
You're ready to start training your model now! Load T5 with [`AutoModelForSeq2SeqLM`]:
```py
>>> from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer
>>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
```
At this point, only three steps remain:
1. Define your training hyperparameters in [`Seq2SeqTrainingArguments`]. The only required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [`Trainer`] will evaluate the SacreBLEU metric and save the training checkpoint.
2. Pass the training arguments to [`Seq2SeqTrainer`] along with the model, dataset, tokenizer, data collator, and `compute_metrics` function.
3. Call [`~Trainer.train`] to finetune your model.
```py
>>> training_args = Seq2SeqTrainingArguments(
... output_dir="my_awesome_opus_books_model",
... evaluation_strategy="epoch",
... learning_rate=2e-5,
... per_device_train_batch_size=16,
... per_device_eval_batch_size=16,
... weight_decay=0.01,
... save_total_limit=3,
... num_train_epochs=2,
... predict_with_generate=True,
... fp16=True,
... push_to_hub=True,
... )
>>> trainer = Seq2SeqTrainer(
... model=model,
... args=training_args,
... train_dataset=tokenized_books["train"],
... eval_dataset=tokenized_books["test"],
... tokenizer=tokenizer,
... data_collator=data_collator,
... compute_metrics=compute_metrics,
... )
>>> trainer.train()
```
Once training is completed, share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model:
```py
>>> trainer.push_to_hub()
```
</pt>
<tf>
<Tip>
If you aren't familiar with finetuning a model with Keras, take a look at the basic tutorial [here](../training#train-a-tensorflow-model-with-keras)!
</Tip>
To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters:
```py
>>> from transformers import AdamWeightDecay
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
```
Then you can load T5 with [`TFAutoModelForSeq2SeqLM`]:
```py
>>> from transformers import TFAutoModelForSeq2SeqLM
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained(checkpoint)
```
Convert your datasets to the `tf.data.Dataset` format with [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
```py
>>> tf_train_set = model.prepare_tf_dataset(
... tokenized_books["train"],
... shuffle=True,
... batch_size=16,
... collate_fn=data_collator,
... )
>>> tf_test_set = model.prepare_tf_dataset(
... tokenized_books["test"],
... shuffle=False,
... batch_size=16,
... collate_fn=data_collator,
... )
```
Configure the model for training with [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to:
```py
>>> import tensorflow as tf
>>> model.compile(optimizer=optimizer) # No loss argument!
```
The last two things to setup before you start training is to compute the SacreBLEU metric from the predictions, and provide a way to push your model to the Hub. Both are done by using [Keras callbacks](../main_classes/keras_callbacks).
Pass your `compute_metrics` function to [`~transformers.KerasMetricCallback`]:
```py
>>> from transformers.keras_callbacks import KerasMetricCallback
>>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
```
Specify where to push your model and tokenizer in the [`~transformers.PushToHubCallback`]:
```py
>>> from transformers.keras_callbacks import PushToHubCallback
>>> push_to_hub_callback = PushToHubCallback(
... output_dir="my_awesome_opus_books_model",
... tokenizer=tokenizer,
... )
```
Then bundle your callbacks together:
```py
>>> callbacks = [metric_callback, push_to_hub_callback]
```
Finally, you're ready to start training your model! Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) with your training and validation datasets, the number of epochs, and your callbacks to finetune the model:
```py
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=callbacks)
```
Once training is completed, your model is automatically uploaded to the Hub so everyone can use it!
</tf>
</frameworkcontent>
<Tip>
For a more in-depth example of how to finetune a model for translation, take a look at the corresponding
[PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb)
or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb).
</Tip>
## Inference
Great, now that you've finetuned a model, you can use it for inference!
Come up with some text you'd like to translate to another language. For T5, you need to prefix your input depending on the task you're working on. For translation from English to French, you should prefix your input as shown below:
```py
>>> text = "translate English to French: Legumes share resources with nitrogen-fixing bacteria."
```
The simplest way to try out your finetuned model for inference is to use it in a [`pipeline`]. Instantiate a `pipeline` for translation with your model, and pass your text to it:
```py
>>> from transformers import pipeline
>>> translator = pipeline("translation", model="my_awesome_opus_books_model")
>>> translator(text)
[{'translation_text': 'Legumes partagent des ressources avec des bactéries azotantes.'}]
```
You can also manually replicate the results of the `pipeline` if you'd like:
<frameworkcontent>
<pt>
Tokenize the text and return the `input_ids` as PyTorch tensors:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model")
>>> inputs = tokenizer(text, return_tensors="pt").input_ids
```
Use the [`~transformers.generation_utils.GenerationMixin.generate`] method to create the translation. For more details about the different text generation strategies and parameters for controlling generation, check out the [Text Generation](../main_classes/text_generation) API.
```py
>>> from transformers import AutoModelForSeq2SeqLM
>>> model = AutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model")
>>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95)
```
Decode the generated token ids back into text:
```py
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
'Les lignées partagent des ressources avec des bactéries enfixant l'azote.'
```
</pt>
<tf>
Tokenize the text and return the `input_ids` as TensorFlow tensors:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model")
>>> inputs = tokenizer(text, return_tensors="tf").input_ids
```
Use the [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] method to create the translation. For more details about the different text generation strategies and parameters for controlling generation, check out the [Text Generation](../main_classes/text_generation) API.
```py
>>> from transformers import TFAutoModelForSeq2SeqLM
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model")
>>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95)
```
Decode the generated token ids back into text:
```py
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
'Les lugumes partagent les ressources avec des bactéries fixatrices d'azote.'
```
</tf>
</frameworkcontent>
| transformers/docs/source/en/tasks/translation.md/0 | {
"file_path": "transformers/docs/source/en/tasks/translation.md",
"repo_id": "transformers",
"token_count": 5209
} | 266 |
- sections:
- local: index
title: ð€ Transformers
- local: quicktour
title: Tour rápido
- local: installation
title: Instalación
title: Empezar
- sections:
- local: pipeline_tutorial
title: Pipelines para inferencia
- local: autoclass_tutorial
title: Carga instancias preentrenadas con un AutoClass
- local: preprocessing
title: Preprocesamiento
- local: training
title: Fine-tuning a un modelo pre-entrenado
- local: accelerate
title: Entrenamiento distribuido con ð€ Accelerate
- local: model_sharing
title: Compartir un modelo
title: Tutoriales
- sections:
- isExpanded: false
sections:
- local: tasks/question_answering
title: Respuesta a preguntas
- local: tasks/language_modeling
title: Modelado de lenguaje
- local: tasks/summarization
title: Generación de resúmenes
- local: tasks/multiple_choice
title: Selección múltiple
- local: tasks/image_captioning
title: SubtÃtulos de imágenes
title: Procesamiento del Lenguaje Natural
- isExpanded: false
sections:
- local: tasks/asr
title: Reconocimiento automático del habla
title: Audio
- isExpanded: false
sections:
- local: tasks/image_classification
title: Clasificación de imágenes
title: Visión Artificial
title: GuÃas prácticas
- sections:
- local: fast_tokenizers
title: Usa tokenizadores de ð€ Tokenizers
- local: multilingual
title: Modelos multilingÃŒes para inferencia
- local: create_a_model
title: Crea una arquitectura personalizada
- local: custom_models
title: Compartir modelos personalizados
- local: run_scripts
title: Entrenamiento con scripts
- local: chat_templating
title: Plantillas para Modelos de Chat
- local: trainer
title: Entrenador
- local: sagemaker
title: Ejecutar el entrenamiento en Amazon SageMaker
- local: converting_tensorflow_models
title: Convertir checkpoints de TensorFlow
- local: serialization
title: Exportar a ONNX
- local: torchscript
title: Exportar a TorchScript
- local: community
title: Los recursos de la comunidad
title: GuÃas para desarrolladores
- sections:
- local: performance
title: Descripción general
- local: debugging
title: Debugging
title: Rendimiento y escalabilidad
- sections:
- local: add_new_pipeline
title: ¿Cómo puedo añadir un pipeline a ð€ Transformers?
- local: pr_checks
title: Verificaciones en un Pull Request
title: Contribuir
- sections:
- local: philosophy
title: FilosofÃa
- local: glossary
title: Glosario
- local: task_summary
title: Lo que ð€ Transformers puede hacer
- local: tasks_explained
title: Como los ð€ Transformers resuelven tareas
- local: attention
title: Mecanismos de atención
- local: pad_truncation
title: Relleno y truncamiento
- local: bertology
title: BERTologÃa
- local: perplexity
title: Perplejidad de los modelos de longitud fija
title: GuÃas conceptuales
| transformers/docs/source/es/_toctree.yml/0 | {
"file_path": "transformers/docs/source/es/_toctree.yml",
"repo_id": "transformers",
"token_count": 1111
} | 267 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Compartir un modelo
Los últimos dos tutoriales mostraron cómo puedes realizar fine-tunning a un modelo con PyTorch, Keras y ð€ Accelerate para configuraciones distribuidas. ¡El siguiente paso es compartir tu modelo con la comunidad! En Hugging Face creemos en compartir abiertamente a todos el conocimiento y los recursos para democratizar la inteligencia artificial. En este sentido, te animamos a considerar compartir tu modelo con la comunidad, de esta forma ayudas a otros ahorrando tiempo y recursos.
En este tutorial aprenderás dos métodos para compartir un modelo trained o fine-tuned en el [Model Hub](https://huggingface.co/models):
- Mediante Código, enviando (push) tus archivos al Hub.
- Con la interfaz Web, con Drag-and-drop de tus archivos al Hub.
<iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player"
frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope;
picture-in-picture" allowfullscreen></iframe>
<Tip>
Para compartir un modelo con la comunidad necesitas una cuenta en [huggingface.co](https://huggingface.co/join). También puedes unirte a una organización existente o crear una nueva.
</Tip>
## CaracterÃsticas de los repositorios
Cada repositorio en el Model Hub se comporta como cualquier otro repositorio en GitHub. Nuestros repositorios ofrecen versioning, commit history, y la habilidad para visualizar diferencias.
El versioning desarrollado dentro del Model Hub es basado en git y [git-lfs](https://git-lfs.github.com/). En otras palabras, puedes tratar un modelo como un repositorio, brindando un mejor control de acceso y escalabilidad. Version control permite *revisions*, un método para apuntar a una versión especÃfica de un modelo utilizando un commit hash, tag o branch.
Como resultado, puedes cargar una versión especÃfica del modelo con el parámetro `revision`:
```py
>>> model = AutoModel.from_pretrained(
... "julien-c/EsperBERTo-small", revision="v2.0.1" # tag name, or branch name, or commit hash
... )
```
Los archivos son editados fácilmente dentro de un repositorio. Incluso puedes observar el commit history y las diferencias:
![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png)
## Configuración inicial
Antes de compartir un modelo al Hub necesitarás tus credenciales de Hugging Face. Si tienes acceso a una terminal ejecuta el siguiente comando en el entorno virtual donde ð€ Transformers esté instalado. Esto guardará tu token de acceso dentro de tu carpeta cache de Hugging Face (~/.cache/ by default):
```bash
huggingface-cli login
```
Si usas un notebook como Jupyter o Colaboratory, asegúrate de tener instalada la biblioteca [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library). Esta biblioteca te permitirá interactuar por código con el Hub.
```bash
pip install huggingface_hub
```
Luego usa `notebook_login` para iniciar sesión al Hub, y sigue el link [aquÃ](https://huggingface.co/settings/token) para generar un token con el que iniciaremos sesión:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Convertir un modelo para todos los Frameworks
Para asegurarnos que tu modelo pueda ser usado por alguien que esté trabajando con un framework diferente, te recomendamos convertir y subir tu modelo con checkpoints de pytorch y tensorflow. Aunque los usuarios aún son capaces de cargar su modelo desde un framework diferente, si se omite este paso será más lento debido a que ð€ Transformers necesitará convertir el checkpoint sobre-la-marcha.
Convertir un checkpoint para otro framework es fácil. Asegúrate tener Pytorch y TensorFlow instalado (Véase [aquÃ](installation) para instrucciones de instalación), y luego encuentra el modelo especÃfico para tu tarea en el otro Framework.
Por ejemplo, supongamos que has entrenado DistilBert para clasificación de secuencias en PyTorch y quieres convertirlo a su equivalente en TensorFlow. Cargas el equivalente en TensorFlow de tu modelo para tu tarea y especificas `from_pt=True` asà ð€ Transformers convertirá el Pytorch checkpoint a un TensorFlow Checkpoint:
```py
>>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True)
```
Luego guardas tu nuevo modelo TensorFlow con su nuevo checkpoint:
```py
>>> tf_model.save_pretrained("path/to/awesome-name-you-picked")
```
De manera similar, especificas `from_tf=True` para convertir un checkpoint de TensorFlow a Pytorch:
```py
>>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True)
>>> pt_model.save_pretrained("path/to/awesome-name-you-picked")
```
Si algún modelo está disponible en Flax, también puedes convertir un checkpoint de Pytorch a Flax:
```py
>>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained(
... "path/to/awesome-name-you-picked", from_pt=True
... )
```
## Compartir un modelo con `Trainer`
<Youtube id="Z1-XMy-GNLQ"/>
Compartir un modelo al Hub es tan simple como añadir un parámetro extra o un callback. Si recuerdas del tutorial de [fine-tuning tutorial](training), la clase [`TrainingArguments`] es donde especificas los Hiperparámetros y opciones de entrenamiento adicionales. Una de estas opciones incluye la habilidad de compartir un modelo directamente al Hub. Para ello configuras `push_to_hub=True` dentro de [`TrainingArguments`]:
```py
>>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True)
```
A continuación, como usualmente, pasa tus argumentos de entrenamiento a [`Trainer`]:
```py
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=small_train_dataset,
... eval_dataset=small_eval_dataset,
... compute_metrics=compute_metrics,
... )
```
Luego que realizas fine-tune a tu modelo, llamas [`~transformers.Trainer.push_to_hub`] en [`Trainer`] para enviar el modelo al Hub!ð€ Transformers incluso añadirá automáticamente los Hiperparámetros de entrenamiento, resultados de entrenamiento y versiones del Framework a tu model card!
```py
>>> trainer.push_to_hub()
```
## Compartir un modelo con `PushToHubCallback`
Los usuarios de TensorFlow pueden activar la misma funcionalidad con [`PushToHubCallback`]. En la funcion [`PushToHubCallback`], agrega:
- Un directorio de salida para tu modelo.
- Un tokenizador.
- El `hub_model_id`, el cual es tu usuario Hub y el nombre del modelo.
```py
>>> from transformers import PushToHubCallback
>>> push_to_hub_callback = PushToHubCallback(
... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model"
... )
```
Agregamos el callback a [`fit`](https://keras.io/api/models/model_training_apis/), y ð€ Transformers enviará el modelo entrenado al Hub:
```py
>>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback)
```
## Usando la función `push_to_hub`
Puedes llamar la función `push_to_hub` directamente en tu modelo para subirlo al Hub.
Especifica el nombre del modelo en `push_to_hub`:
```py
>>> pt_model.push_to_hub("my-awesome-model")
```
Esto creará un repositorio bajo tu usuario con el nombre del modelo `my-awesome-model`. Ahora los usuarios pueden cargar tu modelo con la función `from_pretrained`:
```py
>>> from transformers import AutoModel
>>> model = AutoModel.from_pretrained("your_username/my-awesome-model")
```
Si perteneces a una organización y quieres compartir tu modelo bajo el nombre de la organización, añade el parámetro `organization`:
```py
>>> pt_model.push_to_hub("my-awesome-model", organization="my-awesome-org")
```
La función `push_to_hub` también puede ser usada para añadir archivos al repositorio del modelo. Por ejemplo, añade un tokenizador al repositorio:
```py
>>> tokenizer.push_to_hub("my-awesome-model")
```
O quizás te gustarÃa añadir la versión de TensorFlow de tu modelo fine-tuned en Pytorch:
```py
>>> tf_model.push_to_hub("my-awesome-model")
```
Ahora, cuando navegues a tu perfil en Hugging Face, deberÃas observar el repositorio de tu modelo creado recientemente. Si das click en el tab **Files** observarás todos los archivos que has subido al repositorio.
Para más detalles sobre cómo crear y subir archivos al repositorio, consulta la [documentación del Hub](https://huggingface.co/docs/hub/how-to-upstream).
## Compartir con la interfaz web
Los usuarios que prefieran un enfoque no-code tienen la opción de cargar su modelo a través de la interfaz gráfica del Hub. Visita la página [huggingface.co/new](https://huggingface.co/new) para crear un nuevo repositorio:
![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png)
Desde aquÃ, añade información acerca del modelo:
- Selecciona el **owner** (la persona propietaria) del repositorio. Puedes ser tú o cualquier organización a la que pertenezcas.
- Escoge un nombre para tu modelo. También será el nombre del repositorio.
- Elige si tu modelo es público o privado.
- Especifica la licencia que usará tu modelo.
Ahora puedes hacer click en el tab **Files** y luego en el botón **Add file** para subir un nuevo archivo a tu repositorio. Luego arrastra y suelta un archivo a subir y le añades un mensaje al commit.
![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png)
## Añadiendo una tarjeta de modelo
Para asegurarnos que los usuarios entiendan las capacidades de tu modelo, sus limitaciones, posibles sesgos y consideraciones éticas, por favor añade una tarjeta (como una tarjeta de presentación) al repositorio del modelo. La tarjeta de modelo es definida en el archivo `README.md`. Puedes agregar una de la siguiente manera:
* Elaborando y subiendo manualmente el archivo`README.md`.
* Dando click en el botón **Edit model card** dentro del repositorio.
Toma un momento para ver la [tarjeta de modelo](https://huggingface.co/distilbert/distilbert-base-uncased) de DistilBert para que tengas un buen ejemplo del tipo de información que deberÃa incluir. Consulta [la documentación](https://huggingface.co/docs/hub/models-cards) para más detalles acerca de otras opciones que puedes controlar dentro del archivo `README.md` como la huella de carbono del modelo o ejemplos de widgets. Consulta la documentación [aquÃ](https://huggingface.co/docs/hub/models-cards).
| transformers/docs/source/es/model_sharing.md/0 | {
"file_path": "transformers/docs/source/es/model_sharing.md",
"repo_id": "transformers",
"token_count": 3985
} | 268 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Clasificación de imágenes
<Youtube id="tjAIM7BOYhw"/>
La clasificación de imágenes asigna una etiqueta o clase a una imagen. A diferencia de la clasificación de texto o audio, las entradas son los valores de los pÃxeles que representan una imagen. La clasificación de imágenes tiene muchos usos, como la detección de daños tras una catástrofe, el control de la salud de los cultivos o la búsqueda de signos de enfermedad en imágenes médicas.
Esta guÃa te mostrará como hacer fine-tune al [ViT](https://huggingface.co/docs/transformers/v4.16.2/en/model_doc/vit) en el dataset [Food-101](https://huggingface.co/datasets/food101) para clasificar un alimento en una imagen.
<Tip>
Consulta la [página de la tarea](https://huggingface.co/tasks/audio-classification) de clasificación de imágenes para obtener más información sobre sus modelos, datasets y métricas asociadas.
</Tip>
## Carga el dataset Food-101
Carga solo las primeras 5000 imágenes del dataset Food-101 de la biblioteca ð€ de Datasets ya que es bastante grande:
```py
>>> from datasets import load_dataset
>>> food = load_dataset("food101", split="train[:5000]")
```
Divide el dataset en un train y un test set:
```py
>>> food = food.train_test_split(test_size=0.2)
```
A continuación, observa un ejemplo:
```py
>>> food["train"][0]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F52AFC8AC50>,
'label': 79}
```
El campo `image` contiene una imagen PIL, y cada `label` es un número entero que representa una clase. Crea un diccionario que asigne un nombre de label a un entero y viceversa. El mapeo ayudará al modelo a recuperar el nombre de label a partir del número de la misma:
```py
>>> labels = food["train"].features["label"].names
>>> label2id, id2label = dict(), dict()
>>> for i, label in enumerate(labels):
... label2id[label] = str(i)
... id2label[str(i)] = label
```
Ahora puedes convertir el número de label en un nombre de label para obtener más información:
```py
>>> id2label[str(79)]
'prime_rib'
```
Cada clase de alimento - o label - corresponde a un número; `79` indica una costilla de primera en el ejemplo anterior.
## Preprocesa
Carga el image processor de ViT para procesar la imagen en un tensor:
```py
>>> from transformers import AutoImageProcessor
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
```
Aplica varias transformaciones de imagen al dataset para hacer el modelo más robusto contra el overfitting. En este caso se utilizará el módulo [`transforms`](https://pytorch.org/vision/stable/transforms.html) de torchvision. Recorta una parte aleatoria de la imagen, cambia su tamaño y normalÃzala con la media y la desviación estándar de la imagen:
```py
>>> from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor
>>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std)
>>> _transforms = Compose([RandomResizedCrop(image_processor.size["height"]), ToTensor(), normalize])
```
Crea una función de preprocesamiento que aplique las transformaciones y devuelva los `pixel_values` - los inputs al modelo - de la imagen:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]]
... del examples["image"]
... return examples
```
Utiliza el método [`with_transform`](https://huggingface.co/docs/datasets/package_reference/main_classes?#datasets.Dataset.with_transform) de ð€ Dataset para aplicar las transformaciones sobre todo el dataset. Las transformaciones se aplican sobre la marcha cuando se carga un elemento del dataset:
```py
>>> food = food.with_transform(transforms)
```
Utiliza [`DefaultDataCollator`] para crear un batch de ejemplos. A diferencia de otros data collators en ð€ Transformers, el DefaultDataCollator no aplica un preprocesamiento adicional como el padding.
```py
>>> from transformers import DefaultDataCollator
>>> data_collator = DefaultDataCollator()
```
## Entrena
Carga ViT con [`AutoModelForImageClassification`]. Especifica el número de labels, y pasa al modelo el mapping entre el número de label y la clase de label:
```py
>>> from transformers import AutoModelForImageClassification, TrainingArguments, Trainer
>>> model = AutoModelForImageClassification.from_pretrained(
... "google/vit-base-patch16-224-in21k",
... num_labels=len(labels),
... id2label=id2label,
... label2id=label2id,
... )
```
<Tip>
Si no estás familiarizado con el fine-tuning de un modelo con el [`Trainer`], echa un vistazo al tutorial básico [aquÃ](../training#finetune-with-trainer)!
</Tip>
Al llegar a este punto, solo quedan tres pasos:
1. Define tus hiperparámetros de entrenamiento en [`TrainingArguments`]. Es importante que no elimines las columnas que no se utilicen, ya que esto hará que desaparezca la columna `image`. Sin la columna `image` no puedes crear `pixel_values`. Establece `remove_unused_columns=False` para evitar este comportamiento.
2. Pasa los training arguments al [`Trainer`] junto con el modelo, los datasets, tokenizer y data collator.
3. Llama [`~Trainer.train`] para hacer fine-tune de tu modelo.
```py
>>> training_args = TrainingArguments(
... output_dir="./results",
... per_device_train_batch_size=16,
... evaluation_strategy="steps",
... num_train_epochs=4,
... fp16=True,
... save_steps=100,
... eval_steps=100,
... logging_steps=10,
... learning_rate=2e-4,
... save_total_limit=2,
... remove_unused_columns=False,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... data_collator=data_collator,
... train_dataset=food["train"],
... eval_dataset=food["test"],
... tokenizer=image_processor,
... )
>>> trainer.train()
```
<Tip>
Para ver un ejemplo más a profundidad de cómo hacer fine-tune a un modelo para clasificación de imágenes, echa un vistazo al correspondiente [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
</Tip>
| transformers/docs/source/es/tasks/image_classification.md/0 | {
"file_path": "transformers/docs/source/es/tasks/image_classification.md",
"repo_id": "transformers",
"token_count": 2441
} | 269 |
- sections:
- local: pipeline_tutorial
title: à€ªà€Ÿà€à€ªà€²à€Ÿà€à€šà¥à€ à€à¥ à€žà€Ÿà€¥ à€
à€šà¥à€®à€Ÿà€š à€à€²à€Ÿà€à€ | transformers/docs/source/hi/_toctree.yml/0 | {
"file_path": "transformers/docs/source/hi/_toctree.yml",
"repo_id": "transformers",
"token_count": 65
} | 270 |
<!---
Copyright 2020 The HuggingFace Team. Tutti i diritti riservati.
Concesso in licenza in base alla Licenza Apache, Versione 2.0 (la "Licenza");
non Ú possibile utilizzare questo file se non in conformità con la Licenza.
à possibile ottenere una copia della Licenza all'indirizzo
http://www.apache.org/licenses/LICENSE-2.0
A meno che non sia richiesto dalla legge applicabile o concordato per iscritto, il software
distribuito con la Licenza Ú distribuito su BASE "COSà COM'Ã",
SENZA GARANZIE O CONDIZIONI DI ALCUN TIPO, espresse o implicite.
Per la lingua specifica vedi la Licenza che regola le autorizzazioni e
le limitazioni ai sensi della STESSA.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Migrazione da pacchetti precedenti
## Migrazione da transformers `v3.x` a `v4.x`
Un paio di modifiche sono state introdotte nel passaggio dalla versione 3 alla versione 4. Di seguito Ú riportato un riepilogo delle
modifiche previste:
#### 1. AutoTokenizer e pipeline ora utilizzano tokenizer veloci (rust) per impostazione predefinita.
I tokenizer python e rust hanno all'incirca le stesse API, ma i tokenizer rust hanno un set di funzionalità più completo.
Ciò introduce due modifiche sostanziali:
- La gestione dei token in overflow tra i tokenizer Python e Rust Ú diversa.
- I tokenizers di rust non accettano numeri interi nei metodi di codifica.
##### Come ottenere lo stesso comportamento di v3.x in v4.x
- Le pipeline ora contengono funzionalità aggiuntive pronte all'uso. Vedi la [pipeline di classificazione dei token con il flag `grouped_entities`](main_classes/pipelines#transformers.TokenClassificationPipeline).
- Gli auto-tokenizer ora restituiscono tokenizer rust. Per ottenere invece i tokenizer python, l'utente deve usare il flag `use_fast` impostandolo `False`:
Nella versione `v3.x`:
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
```
per ottenere lo stesso nella versione `v4.x`:
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased", use_fast=False)
```
#### 2. SentencePiece Ú stato rimosso dalle dipendenze richieste
Il requisito sulla dipendenza SentencePiece Ú stato rimosso da `setup.py`. à stato fatto per avere un canale su anaconda cloud senza basarsi su `conda-forge`. Ciò significa che i tokenizer che dipendono dalla libreria SentencePiece non saranno disponibili con un'installazione standard di `transformers`.
Ciò include le versioni **lente** di:
- `XLNetTokenizer`
- `AlbertTokenizer`
- `CamembertTokenizer`
- `MBartTokenizer`
- `PegasusTokenizer`
- `T5Tokenizer`
- `ReformerTokenizer`
- `XLMRobertaTokenizer`
##### Come ottenere lo stesso comportamento della v3.x nella v4.x
Per ottenere lo stesso comportamento della versione `v3.x`, devi installare anche `sentencepiece`:
Nella versione `v3.x`:
```bash
pip install transformers
```
per ottenere lo stesso nella versione `v4.x`:
```bash
pip install transformers[sentencepiece]
```
o
```bash
pip install transformers stentencepiece
```
#### 3. L'architettura delle repo Ú stato aggiornata in modo che ogni modello abbia la propria cartella
Con lâaggiunta di nuovi modelli, il numero di file nella cartella `src/transformers` continua a crescere e diventa più difficile navigare e capire. Abbiamo fatto la scelta di inserire ogni modello e i file che lo accompagnano nelle proprie sottocartelle.
Si tratta di una modifica sostanziale in quanto l'importazione di layer intermedi utilizzando direttamente il modulo di un modello deve essere eseguita tramite un percorso diverso.
##### Come ottenere lo stesso comportamento della v3.x nella v4.x
Per ottenere lo stesso comportamento della versione `v3.x`, devi aggiornare il percorso utilizzato per accedere ai layer.
Nella versione `v3.x`:
```bash
from transformers.modeling_bert import BertLayer
```
per ottenere lo stesso nella versione `v4.x`:
```bash
from transformers.models.bert.modeling_bert import BertLayer
```
#### 4. Impostare l'argomento `return_dict` su `True` per impostazione predefinita
L'[argomento `return_dict`](main_classes/output) abilita la restituzione di oggetti python dict-like contenenti gli output del modello, invece delle tuple standard. Questo oggetto Ú self-documented poiché le chiavi possono essere utilizzate per recuperare valori, comportandosi anche come una tupla e gli utenti possono recuperare oggetti per indexing o slicing.
Questa Ú una modifica sostanziale poiché la tupla non può essere decompressa: `value0, value1 = outputs` non funzionerà .
##### Come ottenere lo stesso comportamento della v3.x nella v4.x
Per ottenere lo stesso comportamento della versione `v3.x`, specifica l'argomento `return_dict` come `False`, sia nella configurazione del modello che nel passaggio successivo.
Nella versione `v3.x`:
```bash
model = BertModel.from_pretrained("google-bert/bert-base-cased")
outputs = model(**inputs)
```
per ottenere lo stesso nella versione `v4.x`:
```bash
model = BertModel.from_pretrained("google-bert/bert-base-cased")
outputs = model(**inputs, return_dict=False)
```
o
```bash
model = BertModel.from_pretrained("google-bert/bert-base-cased", return_dict=False)
outputs = model(**inputs)
```
#### 5. Rimozione di alcuni attributi deprecati
Gli attributi sono stati rimossi se deprecati da almeno un mese. L'elenco completo degli attributi obsoleti Ú disponibile in [#8604](https://github.com/huggingface/transformers/pull/8604).
Ecco un elenco di questi attributi/metodi/argomenti e quali dovrebbero essere le loro sostituzioni:
In diversi modelli, le etichette diventano coerenti con gli altri modelli:
- `masked_lm_labels` diventa `labels` in `AlbertForMaskedLM` e `AlbertForPreTraining`.
- `masked_lm_labels` diventa `labels` in `BertForMaskedLM` e `BertForPreTraining`.
- `masked_lm_labels` diventa `labels` in `DistilBertForMaskedLM`.
- `masked_lm_labels` diventa `labels` in `ElectraForMaskedLM`.
- `masked_lm_labels` diventa `labels` in `LongformerForMaskedLM`.
- `masked_lm_labels` diventa `labels` in `MobileBertForMaskedLM`.
- `masked_lm_labels` diventa `labels` in `RobertaForMaskedLM`.
- `lm_labels` diventa `labels` in `BartForConditionalGeneration`.
- `lm_labels` diventa `labels` in `GPT2DoubleHeadsModel`.
- `lm_labels` diventa `labels` in `OpenAIGPTDoubleHeadsModel`.
- `lm_labels` diventa `labels` in `T5ForConditionalGeneration`.
In diversi modelli, il meccanismo di memorizzazione nella cache diventa coerente con gli altri:
- `decoder_cached_states` diventa `past_key_values` in tutti i modelli BART-like, FSMT e T5.
- `decoder_past_key_values` diventa `past_key_values` in tutti i modelli BART-like, FSMT e T5.
- `past` diventa `past_key_values` in tutti i modelli CTRL.
- `past` diventa `past_key_values` in tutti i modelli GPT-2.
Per quanto riguarda le classi tokenizer:
- L'attributo tokenizer `max_len` diventa `model_max_length`.
- L'attributo tokenizer `return_lengths` diventa `return_length`.
- L'argomento di codifica del tokenizer `is_pretokenized` diventa `is_split_into_words`.
Per quanto riguarda la classe `Trainer`:
- L'argomento `tb_writer` di `Trainer` Ú stato rimosso in favore della funzione richiamabile `TensorBoardCallback(tb_writer=...)`.
- L'argomento `prediction_loss_only` di `Trainer` Ú stato rimosso in favore dell'argomento di classe `args.prediction_loss_only`.
- L'attributo `data_collator` di `Trainer` sarà richiamabile.
- Il metodo `_log` di `Trainer` Ú deprecato a favore di `log`.
- Il metodo `_training_step` di `Trainer` Ú deprecato a favore di `training_step`.
- Il metodo `_prediction_loop` di `Trainer` Ú deprecato a favore di `prediction_loop`.
- Il metodo `is_local_master` di `Trainer` Ú deprecato a favore di `is_local_process_zero`.
- Il metodo `is_world_master` di `Trainer` Ú deprecato a favore di `is_world_process_zero`.
Per quanto riguarda la classe `TrainingArguments`:
- L'argomento `evaluate_during_training` di `TrainingArguments` Ú deprecato a favore di `evaluation_strategy`.
Per quanto riguarda il modello Transfo-XL:
- L'attributo di configurazione `tie_weight` di Transfo-XL diventa `tie_words_embeddings`.
- Il metodo di modellazione `reset_length` di Transfo-XL diventa `reset_memory_length`.
Per quanto riguarda le pipeline:
- L'argomento `topk` di `FillMaskPipeline` diventa `top_k`.
## Passaggio da pytorch-transformers a ð€ Transformers
Ecco un breve riepilogo di ciò a cui prestare attenzione durante il passaggio da `pytorch-transformers` a ð€ Transformers.
### Lâordine posizionale di alcune parole chiave di input dei modelli (`attention_mask`, `token_type_ids`...) Ú cambiato
Per usare Torchscript (vedi #1010, #1204 e #1195) l'ordine specifico delle **parole chiave di input** di alcuni modelli (`attention_mask`, `token_type_ids`...) Ú stato modificato.
Se inizializzavi i modelli usando parole chiave per gli argomenti, ad esempio `model(inputs_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)`, questo non dovrebbe causare alcun cambiamento.
Se inizializzavi i modelli con input posizionali per gli argomenti, ad esempio `model(inputs_ids, attention_mask, token_type_ids)`, potrebbe essere necessario ricontrollare l'ordine esatto degli argomenti di input.
## Migrazione da pytorch-pretrained-bert
Ecco un breve riepilogo di ciò a cui prestare attenzione durante la migrazione da `pytorch-pretrained-bert` a ð€ Transformers
### I modelli restituiscono sempre `tuple`
La principale modifica di rilievo durante la migrazione da `pytorch-pretrained-bert` a ð€ Transformers Ú che il metodo dei modelli di previsione dà sempre una `tupla` con vari elementi a seconda del modello e dei parametri di configurazione.
Il contenuto esatto delle tuple per ciascun modello Ú mostrato in dettaglio nelle docstring dei modelli e nella [documentazione](https://huggingface.co/transformers/).
In quasi tutti i casi, andrà bene prendendo il primo elemento dell'output come quello che avresti precedentemente utilizzato in `pytorch-pretrained-bert`.
Ecco un esempio di conversione da `pytorch-pretrained-bert`
a ð€ Transformers per un modello di classificazione `BertForSequenceClassification`:
```python
# Carichiamo il nostro modello
model = BertForSequenceClassification.from_pretrained("google-bert/bert-base-uncased")
# Se usavi questa riga in pytorch-pretrained-bert :
loss = model(input_ids, labels=labels)
# Ora usa questa riga in ð€ Transformers per estrarre la perdita dalla tupla di output:
outputs = model(input_ids, labels=labels)
loss = outputs[0]
# In ð€ Transformers puoi anche avere accesso ai logit:
loss, logits = outputs[:2]
# Ed anche agli attention weight se configuri il modello per restituirli (e anche altri output, vedi le docstring e la documentazione)
model = BertForSequenceClassification.from_pretrained(" google-bert/bert-base-uncased", output_attentions=True)
outputs = model(input_ids, labels=labels)
loss, logits, attentions = outputs
```
### Serializzazione
Modifica sostanziale nel metodo `from_pretrained()`:
1. I modelli sono ora impostati in modalità di valutazione in maniera predefinita quando usi il metodo `from_pretrained()`. Per addestrarli non dimenticare di riportarli in modalità di addestramento (`model.train()`) per attivare i moduli di dropout.
2. Gli argomenti aggiuntivi `*inputs` e `**kwargs` forniti al metodo `from_pretrained()` venivano passati direttamente al metodo `__init__()` della classe sottostante del modello. Ora sono usati per aggiornare prima l'attributo di configurazione del modello, che può non funzionare con le classi del modello derivate costruite basandosi sui precedenti esempi di `BertForSequenceClassification`. Più precisamente, gli argomenti posizionali `*inputs` forniti a `from_pretrained()` vengono inoltrati direttamente al metodo `__init__()` del modello mentre gli argomenti keyword `**kwargs` (i) che corrispondono agli attributi della classe di configurazione, vengono utilizzati per aggiornare tali attributi (ii) che non corrispondono ad alcun attributo della classe di configurazione, vengono inoltrati al metodo `__init__()`.
Inoltre, sebbene non si tratti di una modifica sostanziale, i metodi di serializzazione sono stati standardizzati e probabilmente dovresti passare al nuovo metodo `save_pretrained(save_directory)` se prima usavi qualsiasi altro metodo di serializzazione.
Ecco un esempio:
```python
### Carichiamo un modello e un tokenizer
model = BertForSequenceClassification.from_pretrained("google-bert/bert-base-uncased")
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
### Facciamo fare alcune cose al nostro modello e tokenizer
# Es: aggiungiamo nuovi token al vocabolario e agli embending del nostro modello
tokenizer.add_tokens(["[SPECIAL_TOKEN_1]", "[SPECIAL_TOKEN_2]"])
model.resize_token_embeddings(len(tokenizer))
# Alleniamo il nostro modello
train(model)
### Ora salviamo il nostro modello e il tokenizer in una cartella
model.save_pretrained("./my_saved_model_directory/")
tokenizer.save_pretrained("./my_saved_model_directory/")
### Ricarichiamo il modello e il tokenizer
model = BertForSequenceClassification.from_pretrained("./my_saved_model_directory/")
tokenizer = BertTokenizer.from_pretrained("./my_saved_model_directory/")
```
### Ottimizzatori: BertAdam e OpenAIAdam ora sono AdamW, lo scheduling Ú quello standard PyTorch
I due ottimizzatori precedenti inclusi, `BertAdam` e `OpenAIAdam`, sono stati sostituiti da un singolo `AdamW` che presenta alcune differenze:
- implementa solo la correzione del weights decay,
- lo scheduling ora Ú esterno (vedi sotto),
- anche il gradient clipping ora Ú esterno (vedi sotto).
Il nuovo ottimizzatore `AdamW` corrisponde alle API di `Adam` di PyTorch e ti consente di utilizzare metodi PyTorch o apex per lo scheduling e il clipping.
Lo scheduling Ú ora standard [PyTorch learning rate schedulers](https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate) e non fanno più parte dell'ottimizzatore.
Ecco un esempio di linear warmup e decay con `BertAdam` e con `AdamW`:
```python
# Parametri:
lr = 1e-3
max_grad_norm = 1.0
num_training_steps = 1000
num_warmup_steps = 100
warmup_proportion = float( num_warmup_steps) / float(num_training_steps) # 0.1
### In precedenza l'ottimizzatore BertAdam veniva istanziato in questo modo:
optimizer = BertAdam(
model.parameters(),
lr=lr,
schedule="warmup_linear",
warmup=warmup_proportion,
num_training_steps=num_training_steps,
)
### e usato in questo modo:
for batch in train_data:
loss = model(batch)
loss.backward()
optimizer.step()
### In ð€ Transformers, ottimizzatore e schedule sono divisi e usati in questo modo:
optimizer = AdamW(
model.parameters(), lr=lr, correct_bias=False
) # Per riprodurre il comportamento specifico di BertAdam impostare correct_bias=False
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps
) # PyTorch scheduler
### e va usato così:
for batch in train_data:
loss = model(batch)
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), max_grad_norm
) # Gradient clipping non Ú più in AdamW (quindi puoi usare amp senza problemi)
optimizer.step()
scheduler.step()
```
| transformers/docs/source/it/migration.md/0 | {
"file_path": "transformers/docs/source/it/migration.md",
"repo_id": "transformers",
"token_count": 5577
} | 271 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Addestramento con script
Insieme ai [notebooks](./noteboks/README) ð€ Transformers, ci sono anche esempi di script che dimostrano come addestrare un modello per un task con [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), o [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax).
Troverai anche script che abbiamo usato nei nostri [progetti di ricerca](https://github.com/huggingface/transformers/tree/main/examples/research_projects) e [precedenti esempi](https://github.com/huggingface/transformers/tree/main/examples/legacy) a cui contribuisce per lo più la comunità . Questi script non sono attivamente mantenuti e richiedono una specifica versione di ð€ Transformers che sarà molto probabilmente incompatibile con l'ultima versione della libreria.
Non Ú dato per scontato che gli script di esempio funzionino senza apportare modifiche per ogni problema, bensì potrebbe essere necessario adattare lo script al tuo caso specifico. Per aiutarti in ciò, la maggioranza degli script espone le modalità di pre-processamento dei dati, consentendoti di modificare lo script come preferisci.
Per qualsiasi feature che vorresti implementare in uno script d'esempio, per favore discutine nel [forum](https://discuss.huggingface.co/) o in un'[issue](https://github.com/huggingface/transformers/issues) prima di inviare una Pull Request. Mentre accogliamo con piacere la correzione di bug, Ú più improbabile che faremo la stessa con una PR che aggiunge funzionalità sacrificando la leggibilità .
Questa guida ti mostrerà come eseguire uno script di esempio relativo al task di summarization in [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) e [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization). Tutti gli esempi funzioneranno con entrambi i framework a meno che non sia specificato altrimenti.
## Installazione
Per eseguire con successo l'ultima versione degli script di esempio, devi **installare ð€ Transformers dalla fonte** in un nuovo ambiente virtuale:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
pip install .
```
Per le precedenti versioni degli script di esempio, clicca sul pulsante di seguito:
<details>
<summary>Esempi per versioni precedenti di ð€ Transformers</summary>
<ul>
<li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li>
</ul>
</details>
Successivamente, cambia la tua attuale copia di ð€ Transformers specificandone la versione, ad esempio v3.5.1:
```bash
git checkout tags/v3.5.1
```
Dopo aver configurato correttamente la versione della libreria, naviga nella cartella degli esempi di tua scelta e installa i requisiti:
```bash
pip install -r requirements.txt
```
## Esegui uno script
<frameworkcontent>
<pt>
Lo script di esempio scarica e pre-processa un dataset dalla libreria ð€ [Datasets](https://huggingface.co/docs/datasets/). Successivamente, lo script esegue il fine-tuning su un dataset usando il [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) su un'architettura che supporta la summarization. Il seguente esempio mostra come eseguire il fine-tuning di [T5-small](https://huggingface.co/google-t5/t5-small) sul dataset [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). Il modello T5 richiede un parametro addizionale `source_prefix` a causa del modo in cui Ú stato addestrato. Questo prefisso permette a T5 di sapere che si tratta di un task di summarization.
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
</pt>
<tf>
Lo script di esempio scarica e pre-processa un dataset dalla libreria ð€ [Datasets](https://huggingface.co/docs/datasets/). Successivamente, lo script esegue il fine-tuning su un dataset usando Keras su un'architettura che supporta la summarization. Il seguente esempio mostra come eseguire il fine-tuning di [T5-small](https://huggingface.co/google-t5/t5-small) sul dataset [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). Il modello T5 richiede un parametro addizionale `source_prefix` a causa del modo in cui Ú stato addestrato. Questo prefisso permette a T5 di sapere che si tratta di un task di summarization.
```bash
python examples/tensorflow/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 16 \
--num_train_epochs 3 \
--do_train \
--do_eval
```
</tf>
</frameworkcontent>
## Addestramento distribuito e precisione mista
Il [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) supporta l'addestramento distribuito e la precisione mista, che significa che puoi anche usarla in uno script. Per abilitare entrambe le funzionalità :
- Aggiunto l'argomento `fp16` per abilitare la precisione mista.
- Imposta un numero di GPU da usare con l'argomento `nproc_per_node`.
```bash
torchrun \
--nproc_per_node 8 pytorch/summarization/run_summarization.py \
--fp16 \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
Gli script TensorFlow utilizzano una [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) per il training distribuito e non devi aggiungere alcun argomento addizionale allo script di training. Lo script TensorFlow userà multiple GPU in modo predefinito se quest'ultime sono disponibili:
## Esegui uno script su TPU
<frameworkcontent>
<pt>
Le Tensor Processing Units (TPU) sono state progettate per migliorare le prestazioni. PyTorch supporta le TPU con il compilatore per deep learning [XLA](https://www.tensorflow.org/xla) (guarda [questo link](https://github.com/pytorch/xla/blob/master/README.md) per maggiori dettagli). Per usare una TPU, avvia lo script `xla_spawn.py` e usa l'argomento `num_cores` per impostare il numero di core TPU che intendi usare.
```bash
python xla_spawn.py --num_cores 8 \
summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
</pt>
<tf>
Le Tensor Processing Units (TPU) sono state progettate per migliorare le prestazioni. Gli script TensorFlow utilizzano una [`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy) per eseguire l'addestramento su TPU. Per usare una TPU, passa il nome della risorsa TPU all'argomento `tpu`.
```bash
python run_summarization.py \
--tpu name_of_tpu_resource \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 16 \
--num_train_epochs 3 \
--do_train \
--do_eval
```
</tf>
</frameworkcontent>
## Esegui uno script con ð€ Accelerate
ð€ [Accelerate](https://huggingface.co/docs/accelerate) Ú una libreria compatibile solo con PyTorch che offre un metodo unificato per addestrare modelli su diverse tipologie di configurazioni (CPU, multiple GPU, TPU) mantenendo una completa visibilità rispetto al ciclo di training di PyTorch. Assicurati di aver effettuato l'installazione di ð€ Accelerate, nel caso non lo avessi fatto:
> Nota: dato che Accelerate Ú in rapido sviluppo, Ú necessario installare la versione proveniente da git per eseguire gli script:
```bash
pip install git+https://github.com/huggingface/accelerate
```
Invece che usare lo script `run_summarization.py`, devi usare lo script `run_summarization_no_trainer.py`. Gli script supportati in ð€ Accelerate avranno un file chiamato `task_no_trainer.py` nella rispettiva cartella. Per iniziare, esegui il seguente comando per creare e salvare un file di configurazione:
```bash
accelerate config
```
Testa la tua configurazione per assicurarti della sua correttezza:
```bash
accelerate test
```
Ora sei pronto per avviare l'addestramento:
```bash
accelerate launch run_summarization_no_trainer.py \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir ~/tmp/tst-summarization
```
## Uso di un dataset personalizzato
Lo script di summarization supporta dataset personalizzati purché siano file CSV o JSON Line. Quando usi il tuo dataset, devi specificare diversi argomenti aggiuntivi:
- `train_file` e `validation_file` specificano dove si trovano i file di addestramento e validazione.
- `text_column` Ú il file di input da riassumere.
- `summary_column` Ú il file di destinazione per l'output.
Uno script di summarization usando un dataset personalizzato sarebbe simile a questo:
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--train_file path_to_csv_or_jsonlines_file \
--validation_file path_to_csv_or_jsonlines_file \
--text_column text_column_name \
--summary_column summary_column_name \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--overwrite_output_dir \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
## Testare uno script
à spesso una buona idea avviare il tuo script su un numero inferiore di esempi tratti dal dataset, per assicurarti che tutto funzioni come previsto prima di eseguire lo script sull'intero dataset, che potrebbe necessitare di ore. Usa i seguenti argomenti per limitare il dataset ad un massimo numero di esempi:
- `max_train_samples`
- `max_eval_samples`
- `max_predict_samples`
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--max_train_samples 50 \
--max_eval_samples 50 \
--max_predict_samples 50 \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
Non tutti gli esempi di script supportano l'argomento `max_predict_samples`. Se non sei sicuro circa il supporto di questo argomento da parte del tuo script, aggiungi l'argomento `-h` per controllare:
```bash
examples/pytorch/summarization/run_summarization.py -h
```
## Riavviare addestramento da un checkpoint
Un'altra utile opzione Ú riavviare un addestramento da un checkpoint precedente. Questo garantirà che tu possa riprendere da dove hai interrotto senza ricominciare se l'addestramento viene interrotto. Ci sono due metodi per riavviare l'addestramento da un checkpoint:
Il primo metodo usa l'argomento `output_dir previous_output_dir` per riavviare l'addestramento dall'ultima versione del checkpoint contenuto in `output_dir`. In questo caso, dovresti rimuovere `overwrite_output_dir`:
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--output_dir previous_output_dir \
--predict_with_generate
```
Il secondo metodo usa l'argomento `resume_from_checkpoint path_to_specific_checkpoint` per riavviare un addestramento da una specifica cartella di checkpoint.
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--resume_from_checkpoint path_to_specific_checkpoint \
--predict_with_generate
```
## Condividi il tuo modello
Tutti gli script possono caricare il tuo modello finale al [Model Hub](https://huggingface.co/models). Prima di iniziare, assicurati di aver effettuato l'accesso su Hugging Face:
```bash
huggingface-cli login
```
Poi, aggiungi l'argomento `push_to_hub` allo script. Questo argomento consentirà di creare un repository con il tuo username Hugging Face e la cartella specificata in `output_dir`.
Per dare uno specifico nome al repository, usa l'argomento `push_to_hub_model_id`. Il repository verrà automaticamente elencata sotto al tuo namespace.
Il seguente esempio mostra come caricare un modello specificando il nome del repository:
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--push_to_hub \
--push_to_hub_model_id finetuned-t5-cnn_dailymail \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
| transformers/docs/source/it/run_scripts.md/0 | {
"file_path": "transformers/docs/source/it/run_scripts.md",
"repo_id": "transformers",
"token_count": 6868
} | 272 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Custom Tools and Prompts
<Tip>
ãã©ã³ã¹ãã©ãŒããŒã®ã³ã³ããã¹ãã§ããŒã«ãšãšãŒãžã§ã³ããäœã§ããããç¥ããªãå Žåã
ãŸã[Transformers Agents](transformers_agents)ããŒãžããèªã¿ããã ãããšããå§ãããŸãã
</Tip>
<Tip warning={true}>
Transformers Agentsã¯å®éšçãªAPIã§ããããã€ã§ãå€æŽãããå¯èœæ§ããããŸãã
ãšãŒãžã§ã³ãã«ãã£ãŠè¿ãããçµæã¯ãAPIãåºç€ãšãªãã¢ãã«ãå€æŽãããå¯èœæ§ããããããå€åããããšããããŸãã
</Tip>
ã«ã¹ã¿ã ããŒã«ãšããã³ãããäœæãã䜿çšããããšã¯ããšãŒãžã§ã³ãã匷åããæ°ããã¿ã¹ã¯ãå®è¡ãããããã«éåžžã«éèŠã§ãã
ãã®ã¬ã€ãã§ã¯ã以äžã®å
容ã説æããŸãïŒ
- ããã³ããã®ã«ã¹ã¿ãã€ãºæ¹æ³
- ã«ã¹ã¿ã ããŒã«ã®äœ¿çšæ¹æ³
- ã«ã¹ã¿ã ããŒã«ã®äœææ¹æ³
## Customizing the prompt
[Transformers Agents](transformers_agents)ã§èª¬æãããŠããããã«ããšãŒãžã§ã³ãã¯[`~Agent.run`]ããã³[`~Agent.chat`]ã¢ãŒãã§å®è¡ã§ããŸãã
`run`ã¢ãŒããš`chat`ã¢ãŒãã®äž¡æ¹ã¯åãããžãã¯ã«åºã¥ããŠããŸãã
ãšãŒãžã§ã³ããé§åããèšèªã¢ãã«ã¯ãé·ãããã³ããã«åºã¥ããŠæ¡ä»¶ä»ãããã
次ã®ããŒã¯ã³ãçæããŠåæ¢ããŒã¯ã³ã«éãããŸã§ããã³ãããå®äºããŸãã
äž¡è
ã®å¯äžã®éãã¯ã`chat`ã¢ãŒãã®éã«ããã³ãããåã®ãŠãŒã¶ãŒã®å
¥åãšã¢ãã«ã®çæãšå
±ã«æ¡åŒµãããããšã§ãã
ããã«ããããšãŒãžã§ã³ãã¯éå»ã®å¯Ÿè©±ã«ã¢ã¯ã»ã¹ã§ãããšãŒãžã§ã³ãã«ããããã¡ã¢ãªããããã®ããã«èŠããŸãã
### Structure of the prompt
ããã³ãããã©ã®ããã«æ§ç¯ãããã©ã®ããã«æé©åã§ããããç解ããããã«ãããã³ããã¯å€§ãŸãã«4ã€ã®éšåã«åãããŠããŸãã
1. ã€ã³ãããã¯ã·ã§ã³ïŒãšãŒãžã§ã³ãã®æ¯ãèããããŒã«ã®æŠå¿µã®èª¬æã
2. ãã¹ãŠã®ããŒã«ã®èª¬æãããã¯ãŠãŒã¶ãŒã«ãã£ãŠå®çŸ©/éžæãããããŒã«ã§ã©ã³ã¿ã€ã æã«åçã«çœ®æããã`<<all_tools>>`ããŒã¯ã³ã«ãã£ãŠå®çŸ©ãããŸãã
3. ã¿ã¹ã¯ãšãã®è§£æ±ºçã®äžé£ã®äŸã
4. çŸåšã®äŸãšè§£æ±ºçã®èŠæ±ã
åéšåãããããç解ããããã«ã`run`ããã³ãããã©ã®ããã«èŠãããã®ç°¡ç¥çãèŠãŠã¿ãŸãããïŒ
````text
ã¿ã¹ã¯ãå®è¡ããããã«ãPythonã®ã·ã³ãã«ãªã³ãã³ãã®ã·ãªãŒãºãèããŠããããšãããã§ãããã
[...]
æå³ãããå Žåã¯ãäžéçµæã衚瀺ããããšãã§ããŸãã
ããŒã«ïŒ
- document_qaïŒããã¯ããã¥ã¡ã³ãïŒpdfïŒã«é¢ãã質åã«çããããŒã«ã§ããæ
å ±ãå«ãããã¥ã¡ã³ãã§ãã `document` ãšãããã¥ã¡ã³ãã«é¢ãã質åã§ãã `question` ãåãåãã質åã«å¯Ÿããåçãå«ãããã¹ããè¿ããŸãã
- image_captionerïŒããã¯ç»åã®èª¬æãçæããããŒã«ã§ãããã£ãã·ã§ã³ã«ããç»åã§ãã `image` ãšã説æãå«ãè±èªã®ããã¹ããè¿ãããã¹ããåãåããŸãã
[...]
ã¿ã¹ã¯: "å€æ° `question` ã«é¢ãã質åã«çããããã®ç»åã«ã€ããŠåçããŠãã ããã質åã¯ãã©ã³ã¹èªã§ãã"
次ã®ããŒã«ã䜿çšããŸãïŒè³ªåãè±èªã«ç¿»èš³ããããã® `translator`ããããŠå
¥åç»åã«é¢ãã質åã«çããããã® `image_qa`ã
åçïŒ
```py
translated_question = translator(question=question, src_lang="French", tgt_lang="English")
print(f"The translated question is {translated_question}.")
answer = image_qa(image=image, question=translated_question)
print(f"The answer is {answer}")
```
ã¿ã¹ã¯ïŒã`document`å
ã§æ幎é·ã®äººç©ãç¹å®ãããã®çµæããããŒãšããŠè¡šç€ºãããã
以äžã®ããŒã«ã䜿çšããŸãïŒ`document_qa`ã䜿çšããŠããã¥ã¡ã³ãå
ã§æ幎é·ã®äººç©ãèŠã€ãããã®åçã«åŸã£ãŠ`image_generator`ã䜿çšããŠç»åãçæããŸãã
åçïŒ
```py
answer = document_qa(document, question="What is the oldest person?")
print(f"The answer is {answer}.")
image = image_generator("A banner showing " + answer)
```
[...]
ã¿ã¹ã¯: "å·ãšæ¹ã®çµµãæããŠãã ãã"
以äžã®ãã®ã䜿çšããŸã
````
å°å
¥éšåïŒ"Tools:"ã®åã®ããã¹ãïŒã¯ãã¢ãã«ã®æ¯ãèããšå®è¡ãã¹ãã¿ã¹ã¯ãæ£ç¢ºã«èª¬æããŠããŸãã
ãã®éšåã¯ãããããšãŒãžã§ã³ããåžžã«åãæ¹æ³ã§æ¯ãèãå¿
èŠããããããã«ã¹ã¿ãã€ãºããå¿
èŠã¯ãããŸããã
2çªç®ã®éšåïŒ"Tools"ã®äžã®ç®æ¡æžãïŒã¯ã`run`ãŸãã¯`chat`ãåŒã³åºããã³ã«åçã«è¿œå ãããŸãã
`agent.toolbox`å
ã®ããŒã«ã®æ°ãšåãæ°ã®ç®æ¡æžãããããããããã®ç®æ¡æžãã«ã¯ããŒã«ã®ååãšèª¬æãå«ãŸããŠããŸãã
```text
- <tool.name>: <tool.description>
```
ãããã確èªããŸãããã `document_qa` ããŒã«ãèªã¿èŸŒãã§ååãšèª¬æãåºåããŸãã
```py
from transformers import load_tool
document_qa = load_tool("document-question-answering")
print(f"- {document_qa.name}: {document_qa.description}")
```
which gives:
```text
- document_qa: This is a tool that answers a question about a document (pdf). It takes an input named `document` which should be the document containing the information, as well as a `question` that is the question about the document. It returns a text that contains the answer to the question.
```
ããŒã«èª¬æ:
ãã®ããŒã«ã¯ã2ã€ã®ããŒãããæãç«ã£ãŠããŸããæåã®ããŒãã§ã¯ãããŒã«ãäœãè¡ããã説æãã2çªç®ã®ããŒãã§ã¯å
¥ååŒæ°ãšæ»ãå€ãã©ã®ããã«æåŸ
ãããããè¿°ã¹ãŠããŸãã
è¯ãããŒã«åãšããŒã«ã®èª¬æã¯ããšãŒãžã§ã³ããæ£ãã䜿çšããããã«éåžžã«éèŠã§ãããšãŒãžã§ã³ããããŒã«ã«ã€ããŠæã£ãŠããå¯äžã®æ
å ±ã¯ããã®ååãšèª¬æã§ãããããã£ãŠãããŒã«åãšèª¬æã®äž¡æ¹ãæ£ç¢ºã«èšè¿°ãããããŒã«ããã¯ã¹å
ã®æ¢åã®ããŒã«ã®ã¹ã¿ã€ã«ã«åèŽããããšã確èªããå¿
èŠããããŸããç¹ã«ã説æã«ã¯ã³ãŒãã¹ã¿ã€ã«ã§ååã§æåŸ
ããããã¹ãŠã®åŒæ°ãèšåãããæåŸ
ãããåãšããããäœã§ãããã®èª¬æãå«ããã¹ãã§ãã
<Tip>
ãã¥ã¬ãŒããããTransformersããŒã«ã®åœåãšèª¬æã確èªããŠãããŒã«ãã©ã®ãããªååãšèª¬æãæã€ã¹ãããç解ããã®ã«åœ¹ç«ã¡ãŸãã
ãã¹ãŠã®ããŒã«ã¯[`Agent.toolbox`]ããããã£ã§ç¢ºèªã§ããŸãã
</Tip>
ã«ã¹ã¿ãã€ãºãããäŸïŒ
ããŒã«ã®äœ¿ãæ¹ããšãŒãžã§ã³ãã«æ£ç¢ºã«ç€ºãäžé£ã®äŸãå«ãŸããŠããŸãããããã®äŸã¯ããšãŒãžã§ã³ããå®éã«æ£ç¢ºã§å®è¡å¯èœãªã³ãŒããçæããå¯èœæ§ãæ倧åããããã«æžãããŠãããããéåžžã«éèŠã§ãã倧èŠæš¡ãªèšèªã¢ãã«ã¯ãããã³ããå
ã®ãã¿ãŒã³ãèªèããæ°ããããŒã¿ã䜿çšããŠãã®ãã¿ãŒã³ãç¹°ãè¿ãããšã«éåžžã«åªããŠããŸãããããã£ãŠãå®è·µã§æ£ããå®è¡å¯èœãªã³ãŒããçæãããšãŒãžã§ã³ãã®å¯èœæ§ãæ倧åããããã«ããããã®äŸã¯æžãããŠããå¿
èŠããããŸãã
以äžã¯ãäžã€ã®äŸã§ãïŒ
````text
Task: "Identify the oldest person in the `document` and create an image showcasing the result as a banner."
I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.
Answer:
```py
answer = document_qa(document, question="What is the oldest person?")
print(f"The answer is {answer}.")
image = image_generator("A banner showing " + answer)
```
````
ãã¿ãŒã³ïŒã¢ãã«ãç¹°ãè¿ããè¡ãããã«æ瀺ããããã¿ãŒã³ã«ã¯ã3ã€ã®éšåããããŸãã
ã¿ã¹ã¯ã®å£°æããšãŒãžã§ã³ãã®æå³ããåäœã®èª¬æããããŠæåŸã«çæãããã³ãŒãã§ãã
ããã³ããã®äžéšã§ãããã¹ãŠã®äŸã«ã¯ããã®æ£ç¢ºãªãã¿ãŒã³ãããããšãŒãžã§ã³ããæ°ããããŒã¯ã³ãçæããéã«ã
åããã¿ãŒã³ãåçŸããããšã確èªããŠããŸãã
ããã³ããã®äŸã¯TransformersããŒã ã«ãã£ãŠå³éžãããäžé£ã®åé¡ã¹ããŒãã¡ã³ãã§å³å¯ã«è©äŸ¡ãããŸãã
ããã«ããããšãŒãžã§ã³ãã®ããã³ããããšãŒãžã§ã³ãã®å®éã®äœ¿çšã±ãŒã¹ã解決ããããã«ã§ããã ãåªãããã®ã«ãªããŸãã
ããã³ããã®æåŸã®éšåã«å¯Ÿå¿ããŠããŸãïŒ
[ãã¡ã](https://github.com/huggingface/transformers/blob/main/src/transformers/tools/evaluate_agent.py)ã®åé¡ã¹ããŒãã¡ã³ãã§å³å¯ã«è©äŸ¡ãããããšãŒãžã§ã³ãã®ããã³ãããã§ããã ãåªãããã®ã«ãªãããã«
æ
éã«éžå®ãããããã³ããäŸãæäŸããŠããŸãã
```text
Task: "Draw me a picture of rivers and lakes"
I will use the following
```
ããããšãŒãžã§ã³ãã«å®æãããããã®æçµçã§æªå®æã®äŸã§ããæªå®æã®äŸã¯ãå®éã®ãŠãŒã¶ãŒå
¥åã«åºã¥ããŠåçã«äœæãããŸããäžèšã®äŸã§ã¯ããŠãŒã¶ãŒã次ã®ããã«å®è¡ããŸããïŒ
```py
agent.run("Draw me a picture of rivers and lakes")
```
ãŠãŒã¶ãŒã®å
¥å - ã€ãŸããã¿ã¹ã¯ïŒ"å·ãšæ¹ã®çµµãæããŠãã ãã"ã¯ã以äžã®ãããªããã³ãããã³ãã¬ãŒãã«å€æãããŸãïŒ"ã¿ã¹ã¯ïŒ<task> \n\n 次ã«ç§ã¯ä»¥äžã䜿çšããŸã"ã
ãã®æã¯ããšãŒãžã§ã³ããæ¡ä»¶ä»ããããããã³ããã®æçµè¡ãæ§æãããããã£ãŠãšãŒãžã§ã³ãã«å¯ŸããŠåã®äŸãšãŸã£ããåãæ¹æ³ã§äŸãçµäºãããã匷ã圱é¿ããŸãã
詳现ã«ã¯ç«ã¡å
¥ããŸãããããã£ãããã³ãã¬ãŒãã¯åãããã³ããæ§é ãæã¡ãäŸã¯ãããã«ç°ãªãã¹ã¿ã€ã«ãæã£ãŠããŸããäŸïŒ
````text
[...]
=====
Human: Answer the question in the variable `question` about the image stored in the variable `image`.
Assistant: I will use the tool `image_qa` to answer the question on the input image.
```py
answer = image_qa(text=question, image=image)
print(f"The answer is {answer}")
```
Human: I tried this code, it worked but didn't give me a good result. The question is in French
Assistant: In this case, the question needs to be translated first. I will use the tool `translator` to do this.
```py
translated_question = translator(question=question, src_lang="French", tgt_lang="English")
print(f"The translated question is {translated_question}.")
answer = image_qa(text=translated_question, image=image)
print(f"The answer is {answer}")
```
=====
[...]
````
*Human:* `run`ããã³ããã®äŸãšã¯å¯Ÿç
§çã«ãå`chat`ããã³ããã®äŸã«ã¯*Human*ãš*Assistant*ã®éã§1ã€ä»¥äžã®ãããšãããããŸããåãããšãã¯ã`run`ããã³ããã®äŸãšåæ§ã®æ§é ã«ãªã£ãŠããŸãããŠãŒã¶ãŒã®å
¥åã¯*Human:*ã®åŸãã«è¿œå ããããšãŒãžã§ã³ãã«ã¯ã³ãŒããçæããåã«äœãè¡ãå¿
èŠãããããæåã«çæããããã«æ瀺ãããŸãããããšãã¯ä»¥åã®ãããšãã«åºã¥ããŠè¡ãããããšãããããŠãŒã¶ãŒããI tried **this** codeããšå
¥åããããã«ã以åã«çæããããšãŒãžã§ã³ãã®ã³ãŒããåç
§ã§ããŸãã
*Assistant:* `.chat`ãå®è¡ãããšããŠãŒã¶ãŒã®å
¥åãŸãã¯*ã¿ã¹ã¯*ãæªå®äºã®åœ¢åŒã«å€æãããŸãïŒ
```text
Human: <user-input>\n\nAssistant:
```
以äžã®ãšãŒãžã§ã³ããå®äºããã³ãã³ãã«ã€ããŠèª¬æããŸãã `run` ã³ãã³ããšã¯å¯Ÿç
§çã«ã`chat` ã³ãã³ãã¯å®äºããäŸãããã³ããã«è¿œå ããŸãããã®ããã次㮠`chat` ã¿ãŒã³ã®ããã«ãšãŒãžã§ã³ãã«ããå€ãã®æèãæäŸããŸãã
ããŠãããã³ããã®æ§é ãããã£ããšããã§ãã©ã®ããã«ã«ã¹ã¿ãã€ãºã§ããããèŠãŠã¿ãŸãããïŒ
### Writing good user inputs
倧èŠæš¡ãªèšèªã¢ãã«ã¯ãŠãŒã¶ãŒã®æå³ãç解ããèœåããŸããŸãåäžããŠããŸããããšãŒãžã§ã³ããæ£ããã¿ã¹ã¯ãéžæããã®ãå©ããããã«ãã§ããã ãæ£ç¢ºã«èšè¿°ããããšãéåžžã«åœ¹ç«ã¡ãŸããã§ããã ãæ£ç¢ºã§ãããšã¯äœãæå³ããã®ã§ããããïŒ
ãšãŒãžã§ã³ãã¯ãããã³ããã§ããŒã«åãšãã®èª¬æã®ãªã¹ããèŠãŠããŸããããŒã«ãè¿œå ãããã»ã©ããšãŒãžã§ã³ããæ£ããããŒã«ãéžæããã®ãé£ãããªããæ£ããããŒã«ã®é£ç¶ãéžæããã®ã¯ããã«é£ãããªããŸããå
±éã®å€±æäŸãèŠãŠã¿ãŸããããããã§ã¯ã³ãŒãã®ã¿ãè¿ãããšã«ããŸãã
```py
from transformers import HfAgent
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
agent.run("Show me a tree", return_code=True)
```
gives:
```text
==Explanation from the agent==
I will use the following tool: `image_segmenter` to create a segmentation mask for the image.
==Code generated by the agent==
mask = image_segmenter(image, prompt="tree")
```
ããã¯ããããç§ãã¡ãæãã§ãããã®ã§ã¯ãªãã§ãããã代ããã«ãæšã®ç»åãçæãããããšãããå¯èœæ§ãé«ãã§ãã
ç¹å®ã®ããŒã«ã䜿çšãããããšãŒãžã§ã³ããèªå°ããããã«ãããŒã«ã®ååã説æã«å«ãŸããŠããéèŠãªããŒã¯ãŒãã䜿çšããããšã¯éåžžã«åœ¹ç«ã¡ãŸããããŠã詳ããèŠãŠã¿ãŸãããã
```py
agent.toolbox["image_generator"].description
```
```text
'This is a tool that creates an image according to a prompt, which is a text description. It takes an input named `prompt` which contains the image description and outputs an image.
```
ååãšèª¬ææã«ã¯ãããŒã¯ãŒããç»åãããããã³ãããããäœæããããã³ãçæãã䜿çšãããŠããŸãããããã®èšèã䜿çšããããšã§ãããã§ã®åäœãããå¹æçã«ãªãå¯èœæ§ãé«ãã§ããããã³ãããå°ã詳现ã«èª¿æŽããŸãããã
```py
agent.run("Create an image of a tree", return_code=True)
```
gives:
```text
==Explanation from the agent==
I will use the following tool `image_generator` to generate an image of a tree.
==Code generated by the agent==
image = image_generator(prompt="tree")
```
ç°¡åã«èšããšããšãŒãžã§ã³ããã¿ã¹ã¯ãæ£ç¢ºã«é©åãªããŒã«ã«ãããã³ã°ã§ããªãå Žåã¯ãããŒã«ã®ååã説æã®æãé¢é£æ§ã®ããããŒã¯ãŒãã調ã¹ãŠãã¿ã¹ã¯ãªã¯ãšã¹ããããã«åãããŠæŽç·ŽãããŠã¿ãŠãã ããã
### Customizing the tool descriptions
以åã«ãèŠãããã«ããšãŒãžã§ã³ãã¯åããŒã«ã®ååãšèª¬æã«ã¢ã¯ã»ã¹ã§ããŸããããŒã¹ã®ããŒã«ã¯éåžžã«æ£ç¢ºãªååãšèª¬æãæã£ãŠããã¯ãã§ãããç¹å®ã®ãŠãŒã¹ã±ãŒã¹ã«åãããŠããŒã«ã®èª¬æãååãå€æŽããããšã圹ç«ã€ãããããŸãããããã¯ãéåžžã«é¡äŒŒããè€æ°ã®ããŒã«ãè¿œå ããå Žåããç¹å®ã®ãã¡ã€ã³ïŒããšãã°ãç»åçæãå€æãªã©ïŒã§ãšãŒãžã§ã³ãã䜿çšããå Žåã«ç¹ã«éèŠã«ãªããããããŸããã
ããããåé¡ã¯ããšãŒãžã§ã³ããç»åçæã¿ã¹ã¯ã«é »ç¹ã«äœ¿çšãããå Žåãç»åçæãšç»åå€æ/ä¿®æ£ãæ··åããããšã§ãã
äŸïŒ
```py
agent.run("Make an image of a house and a car", return_code=True)
```
returns
```text
==Explanation from the agent==
I will use the following tools `image_generator` to generate an image of a house and `image_transformer` to transform the image of a car into the image of a house.
==Code generated by the agent==
house_image = image_generator(prompt="A house")
car_image = image_generator(prompt="A car")
house_car_image = image_transformer(image=car_image, prompt="A house")
```
ããã¯ããããç§ãã¡ãããã§æãã§ããæ£ç¢ºãªãã®ã§ã¯ãªãããã§ãããšãŒãžã§ã³ãã¯ãimage_generatorããšãimage_transformerãã®éããç解ããã®ãé£ããããã§ããã°ãã°äž¡æ¹ãäžç·ã«äœ¿çšããŸãã
ããã§ãšãŒãžã§ã³ãããµããŒãããããã«ã"image_transformer"ã®ããŒã«åãšèª¬æãå€æŽããŠãå°ã"image"ã"prompt"ããåãé¢ããŠã¿ãŸãããã代ããã«ããããmodifierããšåŒã³ãŸãããïŒ
```py
agent.toolbox["modifier"] = agent.toolbox.pop("image_transformer")
agent.toolbox["modifier"].description = agent.toolbox["modifier"].description.replace(
"transforms an image according to a prompt", "modifies an image"
)
```
ãå€æŽãã¯ãäžèšã®ããã³ããã«æ°ããç»åããã»ããµã䜿çšãã匷åãªæãããã§ããããã§ã¯ãããäžåºŠå®è¡ããŠã¿ãŸãããã
```py
agent.run("Make an image of a house and a car", return_code=True)
```
Now we're getting:
```text
==Explanation from the agent==
I will use the following tools: `image_generator` to generate an image of a house, then `image_generator` to generate an image of a car.
==Code generated by the agent==
house_image = image_generator(prompt="A house")
car_image = image_generator(prompt="A car")
```
ããã¯ãç§ãã¡ãèããŠãããã®ã«ç¢ºå®ã«è¿ã¥ããŠããŸãïŒãã ãã家ãšè»ãåãç»åã«å«ããããšèããŠããŸããã¿ã¹ã¯ãåäžã®ç»åçæã«åããããšã§ãããé©åãªæ¹åã«é²ããã¯ãã§ãïŒ
```py
agent.run("Create image: 'A house and car'", return_code=True)
```
```text
==Explanation from the agent==
I will use the following tool: `image_generator` to generate an image.
==Code generated by the agent==
image = image_generator(prompt="A house and car")
```
<Tip warning={true}>
ãšãŒãžã§ã³ãã¯ãç¹ã«è€æ°ã®ãªããžã§ã¯ãã®ç»åãçæãããªã©ãããè€éãªãŠãŒã¹ã±ãŒã¹ã«é¢ããŠã¯ããŸã å€ãã®ãŠãŒã¹ã±ãŒã¹ã«å¯ŸããŠè匱ã§ãã
ãšãŒãžã§ã³ãèªäœãšãã®åºç€ãšãªãããã³ããã¯ãä»åŸæ°ã¶æã§ããã«æ¹åãããããŸããŸãªãŠãŒã¶ãŒã®å
¥åã«å¯ŸããŠãšãŒãžã§ã³ããããé å¥ã«ãªãããã«ãªããŸãã
</Tip>
### Customizing the whole project
ãŠãŒã¶ãŒã«æ倧éã®æè»æ§ãæäŸããããã«ã[äžèš](#structure-of-the-prompt)ã§èª¬æãããããã³ãããã³ãã¬ãŒãå
šäœããŠãŒã¶ãŒãäžæžãã§ããŸãããã®å Žåãã«ã¹ã¿ã ããã³ããã«ã¯å°å
¥ã»ã¯ã·ã§ã³ãããŒã«ã»ã¯ã·ã§ã³ãäŸã»ã¯ã·ã§ã³ãæªå®äºã®äŸã»ã¯ã·ã§ã³ãå«ãŸããŠããããšã確èªããŠãã ããã`run` ããã³ãããã³ãã¬ãŒããäžæžããããå Žåã以äžã®ããã«è¡ãããšãã§ããŸã:
```py
template = """ [...] """
agent = HfAgent(your_endpoint, run_prompt_template=template)
```
<Tip warning={true}>
`<<all_tools>>` æååãš `<<prompt>>` ã¯ããšãŒãžã§ã³ãã䜿çšã§ããããŒã«ãèªèãããŠãŒã¶ãŒã®ããã³ãããæ£ããæ¿å
¥ã§ããããã«ã`template` ã®ã©ããã«å®çŸ©ãããŠããããšã確èªããŠãã ããã
</Tip>
åæ§ã«ã`chat` ããã³ãããã³ãã¬ãŒããäžæžãããããšãã§ããŸãããªãã`chat` ã¢ãŒãã§ã¯åžžã«ä»¥äžã®åœ¢åŒã§äº€æãè¡ãããŸãïŒ
äžèšã®ããã¹ãã®äžã«æ¥æ¬èªã®ç¿»èš³ãæäŸããŠãã ãããMarkdownã³ãŒããšããŠæžããŠãã ããã
```text
Human: <<task>>
Assistant:
```
ãããã£ãŠãã«ã¹ã¿ã `chat`ããã³ãããã³ãã¬ãŒãã®äŸããã®ãã©ãŒãããã䜿çšããããšãéèŠã§ãã以äžã®ããã«ãã€ã³ã¹ã¿ã³ã¹åæã«`chat`ãã³ãã¬ãŒããäžæžãã§ããŸãã
```python
template = """ [...] """
agent = HfAgent(url_endpoint=your_endpoint, chat_prompt_template=template)
```
<Tip warning={true}>
`<<all_tools>>` ãšããæååã `template` å
ã§å®çŸ©ãããŠããããšã確èªããŠãã ãããããã«ããããšãŒãžã§ã³ãã¯äœ¿çšå¯èœãªããŒã«ãææ¡ã§ããŸãã
</Tip>
äž¡æ¹ã®å Žåãããã³ãããã³ãã¬ãŒãã®ä»£ããã«ãã³ãã¥ããã£ã®èª°ãããã¹ããããã³ãã¬ãŒãã䜿çšãããå Žåã¯ããªããžããªIDãæž¡ãããšãã§ããŸããããã©ã«ãã®ããã³ããã¯ã[ãã®ãªããžããª](https://huggingface.co/datasets/huggingface-tools/default-prompts) ã«ãããŸãã®ã§ãåèã«ãªããŸãã
ã«ã¹ã¿ã ããã³ãããHubã®ãªããžããªã«ã¢ããããŒãããŠã³ãã¥ããã£ãšå
±æããå Žåã¯ã次ã®ããšã確èªããŠãã ããïŒ
- ããŒã¿ã»ãããªããžããªã䜿çšããããš
- `run` ã³ãã³ãçšã®ããã³ãããã³ãã¬ãŒãã `run_prompt_template.txt` ãšããååã®ãã¡ã€ã«ã«é
眮ããããš
- `chat` ã³ãã³ãçšã®ããã³ãããã³ãã¬ãŒãã `chat_prompt_template.txt` ãšããååã®ãã¡ã€ã«ã«é
眮ããããš
## Using custom tools
ãã®ã»ã¯ã·ã§ã³ã§ã¯ãç»åçæã«ç¹åãã2ã€ã®æ¢åã®ã«ã¹ã¿ã ããŒã«ãå©çšããŸãïŒ
- [huggingface-tools/image-transformation](https://huggingface.co/spaces/huggingface-tools/image-transformation) ãããå€ãã®ç»åå€æŽãå¯èœã«ããããã« [diffusers/controlnet-canny-tool](https://huggingface.co/spaces/diffusers/controlnet-canny-tool) ã«çœ®ãæããŸãã
- ç»åã®ã¢ããã¹ã±ãŒãªã³ã°çšã®æ°ããããŒã«ãããã©ã«ãã®ããŒã«ããã¯ã¹ã«è¿œå ããŸãïŒ[diffusers/latent-upscaler-tool](https://huggingface.co/spaces/diffusers/latent-upscaler-tool) ã¯æ¢åã®ç»åå€æããŒã«ã眮ãæããŸãã
䟿å©ãª [`load_tool`] é¢æ°ã䜿çšããŠã«ã¹ã¿ã ããŒã«ãããŒãããŸãïŒ
```py
from transformers import load_tool
controlnet_transformer = load_tool("diffusers/controlnet-canny-tool")
upscaler = load_tool("diffusers/latent-upscaler-tool")
```
ãšãŒãžã§ã³ãã«ã«ã¹ã¿ã ããŒã«ãè¿œå ãããšãããŒã«ã®èª¬æãšååããšãŒãžã§ã³ãã®ããã³ããã«èªåçã«å«ãŸããŸãããããã£ãŠããšãŒãžã§ã³ããã«ã¹ã¿ã ããŒã«ã®äœ¿çšæ¹æ³ãç解ã§ããããã«ãã«ã¹ã¿ã ããŒã«ã«ã¯é©åã«èšè¿°ããã説æãšååãå¿
èŠã§ãã
`controlnet_transformer`ã®èª¬æãšååãèŠãŠã¿ãŸãããã
æåã«ã䟿å©ãª[`load_tool`]é¢æ°ã䜿çšããŠã«ã¹ã¿ã ããŒã«ãããŒãããŸãã
```py
print(f"Description: '{controlnet_transformer.description}'")
print(f"Name: '{controlnet_transformer.name}'")
```
gives
```text
Description: 'This is a tool that transforms an image with ControlNet according to a prompt.
It takes two inputs: `image`, which should be the image to transform, and `prompt`, which should be the prompt to use to change it. It returns the modified image.'
Name: 'image_transformer'
```
ååãšèª¬æã¯æ£ç¢ºã§ããã[å³éžãããããŒã«](./transformers_agents#a-curated-set-of-tools)ã®ã¹ã¿ã€ã«ã«åã£ãŠããŸãã
次ã«ã`controlnet_transformer`ãš`upscaler`ã䜿ã£ãŠãšãŒãžã§ã³ããã€ã³ã¹ã¿ã³ã¹åããŸãã
```py
tools = [controlnet_transformer, upscaler]
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=tools)
```
以äžã®ã³ãã³ãã¯ã以äžã®æ
å ±ãæäŸããŸãïŒ
```text
image_transformer has been replaced by <transformers_modules.diffusers.controlnet-canny-tool.bd76182c7777eba9612fc03c0
8718a60c0aa6312.image_transformation.ControlNetTransformationTool object at 0x7f1d3bfa3a00> as provided in `additional_tools`
```
äžé£ã®å³éžãããããŒã«ã«ã¯ãã§ã« `image_transformer` ããŒã«ãããããããã«ã¹ã¿ã ããŒã«ã§çœ®ãæããŸãã
<Tip>
æ¢åã®ããŒã«ãäžæžãããããšã¯ãç¹å®ã®ã¿ã¹ã¯ã«æ¢åã®ããŒã«ããŸã£ããåãç®çã§äœ¿çšãããå Žåã«æçã§ããããšããããŸãã
ãªããªãããšãŒãžã§ã³ãã¯ãã®ç¹å®ã®ã¿ã¹ã¯ã®äœ¿çšæ¹æ³ã«ç²ŸéããŠããããã§ãããã®å Žåãã«ã¹ã¿ã ããŒã«ã¯æ¢åã®ããŒã«ãšãŸã£ããåãAPIã«åŸããããã®ããŒã«ã䜿çšãããã¹ãŠã®äŸãæŽæ°ãããããã«ããã³ãããã³ãã¬ãŒããé©å¿ãããå¿
èŠããããŸãã
</Tip>
ã¢ããã¹ã±ãŒã©ãŒããŒã«ã«ã¯ `image_upscaler` ãšããååãä»ããããããã¯ããã©ã«ãã®ããŒã«ããã¯ã¹ã«ã¯ãŸã ååšããªããããåã«ããŒã«ã®ãªã¹ãã«è¿œå ãããŸãã
ãšãŒãžã§ã³ããçŸåšäœ¿çšå¯èœãªããŒã«ããã¯ã¹ã確èªããã«ã¯ã`agent.toolbox` å±æ§ã䜿çšã§ããŸãã
```py
print("\n".join([f"- {a}" for a in agent.toolbox.keys()]))
```
```text
- document_qa
- image_captioner
- image_qa
- image_segmenter
- transcriber
- summarizer
- text_classifier
- text_qa
- text_reader
- translator
- image_transformer
- text_downloader
- image_generator
- video_generator
- image_upscaler
```
泚æ: `image_upscaler` ããšãŒãžã§ã³ãã®ããŒã«ããã¯ã¹ã®äžéšãšãªã£ãããšã«æ³šç®ããŠãã ããã
ããã§ã¯ãæ°ããããŒã«ãè©ŠããŠã¿ãŸãããïŒ[Transformers Agents Quickstart](./transformers_agents#single-execution-run) ã§çæããç»åãåå©çšããŸãã
```py
from diffusers.utils import load_image
image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png"
)
```
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200>
çŸããå¬ã®é¢šæ¯ã«ãã®ç»åãå€èº«ãããŸãããïŒ
```py
image = agent.run("Transform the image: 'A frozen lake and snowy forest'", image=image)
```
```text
==Explanation from the agent==
I will use the following tool: `image_transformer` to transform the image.
==Code generated by the agent==
image = image_transformer(image, prompt="A frozen lake and snowy forest")
```
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_winter.png" width=200>
æ°ããç»ååŠçããŒã«ã¯ãéåžžã«åŒ·åãªç»åã®å€æŽãè¡ãããšãã§ããControlNetã«åºã¥ããŠããŸãã
ããã©ã«ãã§ã¯ãç»ååŠçããŒã«ã¯ãµã€ãºã512x512ãã¯ã»ã«ã®ç»åãè¿ããŸãããããæ¡å€§ã§ãããèŠãŠã¿ãŸãããã
```py
image = agent.run("Upscale the image", image)
```
```text
==Explanation from the agent==
I will use the following tool: `image_upscaler` to upscale the image.
==Code generated by the agent==
upscaled_image = image_upscaler(image)
```
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_winter_upscale.png" width=400>
ãšãŒãžã§ã³ãã¯ãããã³ãããç»åã®æ¡å€§ããããã®èª¬æãšããŒã«ã®ååã ããåºã«ãæ°ãã«è¿œå ãããã¢ããã¹ã±ãŒãªã³ã°ããŒã«ã«èªåçã«ãããã³ã°ããæ£ããå®è¡ã§ããŸããã
次ã«ãæ°ããã«ã¹ã¿ã ããŒã«ãäœæããæ¹æ³ãèŠãŠã¿ãŸãããã
### Adding new tools
ãã®ã»ã¯ã·ã§ã³ã§ã¯ããšãŒãžã§ã³ãã«è¿œå ã§ããæ°ããããŒã«ã®äœææ¹æ³ã瀺ããŸãã
#### Creating a new tool
ãŸããããŒã«ã®äœæããå§ããŸãããã次ã®ã³ãŒãã§ãç¹å®ã®ã¿ã¹ã¯ã«é¢ããŠHugging Face Hubã§æãããŠã³ããŒããããã¢ãã«ãååŸãããããŸã圹ç«ããªãããã©ã楜ããã¿ã¹ã¯ãè¿œå ããŸãã
以äžã®ã³ãŒãã§ãããè¡ãããšãã§ããŸãïŒ
```python
from huggingface_hub import list_models
task = "text-classification"
model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
print(model.id)
```
ã¿ã¹ã¯ `text-classification` ã®å Žåããã㯠`'facebook/bart-large-mnli'` ãè¿ããŸãã`translation` ã®å Žåã`'google-t5/t5-base'` ãè¿ããŸãã
ããããšãŒãžã§ã³ããå©çšã§ããããŒã«ã«å€æããæ¹æ³ã¯äœã§ããããïŒãã¹ãŠã®ããŒã«ã¯ãäž»èŠãªå±æ§ãä¿æããã¹ãŒããŒã¯ã©ã¹ `Tool` ã«äŸåããŠããŸããç§ãã¡ã¯ããããç¶æ¿ããã¯ã©ã¹ãäœæããŸã:
```python
from transformers import Tool
class HFModelDownloadsTool(Tool):
pass
```
ãã®ã¯ã©ã¹ã«ã¯ããã€ãã®å¿
èŠãªèŠçŽ ããããŸãïŒ
- `name` å±æ§ïŒããã¯ããŒã«èªäœã®ååã«å¯Ÿå¿ããä»ã®ããŒã«ãšèª¿åããããã« `model_download_counter` ãšåä»ããŸãã
- `description` å±æ§ïŒããã¯ãšãŒãžã§ã³ãã®ããã³ãããåããããã«äœ¿çšãããŸãã
- `inputs` ãš `outputs` å±æ§ïŒããããå®çŸ©ããããšã§ãPython ã€ã³ã¿ãŒããªã¿ãŒãåã«é¢ããè³¢æãªéžæãè¡ãã®ã«åœ¹ç«ã¡ãããŒã«ãHubã«ããã·ã¥ããéã«gradio-demoãçæã§ããããã«ãªããŸãããããã¯ãäºæ³ãããå€ã®ãªã¹ãã§ããã`text`ã`image`ããŸãã¯`audio`ã«ãªãããšããããŸãã
- `__call__` ã¡ãœããïŒããã«ã¯æšè«ã³ãŒããå«ãŸããŠããŸããããã¯äžèšã§è©Šããã³ãŒãã§ãïŒ
ãã¡ããçŸåšã®ã¯ã©ã¹ã®å€èŠ³ã§ãïŒ
```python
from transformers import Tool
from huggingface_hub import list_models
class HFModelDownloadsTool(Tool):
name = "model_download_counter"
description = (
"This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. "
"It takes the name of the category (such as text-classification, depth-estimation, etc), and "
"returns the name of the checkpoint."
)
inputs = ["text"]
outputs = ["text"]
def __call__(self, task: str):
model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
return model.id
```
ããŠãä»åºŠã¯ããŒã«ã䜿ããããã«ãªããŸããããã®ããŒã«ããã¡ã€ã«ã«ä¿åããã¡ã€ã³ã¹ã¯ãªããããã€ã³ããŒãããŸãããããã®ãã¡ã€ã«ã `model_downloads.py` ãšããååã«ããçµæã®ã€ã³ããŒãã³ãŒãã¯æ¬¡ã®ããã«ãªããŸãïŒ
以äžã¯ãçŸåšã®ã¯ã©ã¹ã®å€èŠ³ã§ãïŒ
```python
from model_downloads import HFModelDownloadsTool
tool = HFModelDownloadsTool()
```
ä»ã®äººã
ã«å©çããããããããç°¡åãªåæåã®ããã«ããããHubã«ããªãã®åå空éã§ããã·ã¥ããããšããå§ãããŸãããããè¡ãã«ã¯ã`tool` å€æ°ã§ `push_to_hub` ãåŒã³åºãã ãã§ãïŒ
```python
tool.push_to_hub("hf-model-downloads")
```
ãšãŒãžã§ã³ããããŒã«ã䜿çšããæ¹æ³ã«ã€ããŠãæçµã¹ããããèŠãŠã¿ãŸãããã
#### Having the agent use the tool
Hubã«ããããŒã«ããããŸããããã¯æ¬¡ã®ããã«ã€ã³ã¹ã¿ã³ã¹åã§ããŸãïŒãŠãŒã¶ãŒåãããŒã«ã«åãããŠå€æŽããŠãã ããïŒ:
```python
from transformers import load_tool
tool = load_tool("lysandre/hf-model-downloads")
```
ãšãŒãžã§ã³ãã§äœ¿çšããããã«ã¯ããšãŒãžã§ã³ãã®åæåã¡ãœããã® `additional_tools` ãã©ã¡ãŒã¿ã«ãããæž¡ãã ãã§ãïŒ
```python
from transformers import HfAgent
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=[tool])
agent.run(
"Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?"
)
```
which outputs the following:
```text
==Code generated by the agent==
model = model_download_counter(task="text-to-video")
print(f"The model with the most downloads is {model}.")
audio_model = text_reader(model)
==Result==
The model with the most downloads is damo-vilab/text-to-video-ms-1.7b.
```
以äžã®ããã¹ãã¯ã次ã®ãªãŒãã£ãªãçæããŸãã
**Audio** |
|------------------------------------------------------------------------------------------------------------------------------------------------------|
| <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/damo.wav" type="audio/wav"/> |
<Tip>
ç¹å®ã®LLMã«äŸåããããšããããããŸãæ©èœãããããã«ã¯éåžžã«æ£ç¢ºãªããã³ãããå¿
èŠãªãã®ããããŸããããŒã«ã®ååãšèª¬æãæ確ã«å®çŸ©ããããšã¯ããšãŒãžã§ã³ãã«ãã£ãŠæŽ»çšãããããã«éåžžã«éèŠã§ãã
</Tip>
### Replacing existing tools
æ¢åã®ããŒã«ã眮ãæããã«ã¯ãæ°ããã¢ã€ãã ããšãŒãžã§ã³ãã®ããŒã«ããã¯ã¹ã«å²ãåœãŠãã ãã§è¡ãããšãã§ããŸãã以äžã¯ãã®æ¹æ³ã§ã:
```python
from transformers import HfAgent, load_tool
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
agent.toolbox["image-transformation"] = load_tool("diffusers/controlnet-canny-tool")
```
<Tip>
ä»ã®ããŒã«ã§ããŒã«ã眮ãæããéã«ã¯æ³šæãå¿
èŠã§ãïŒããã«ããããšãŒãžã§ã³ãã®ããã³ããã調æŽãããŸããããã¯ãã¿ã¹ã¯ã«é©ããããè¯ãããã³ãããæã£ãŠããå Žåã«ã¯è¯ãããšã§ãããä»ã®ããŒã«ãéžæããã確çãé«ããªããå®çŸ©ããããŒã«ã®ä»£ããã«ä»ã®ããŒã«ãéžæãããããšããããããããŸããã
</Tip>
## Leveraging gradio-tools
[gradio-tools](https://github.com/freddyaboulton/gradio-tools)ã¯ãHugging Face SpacesãããŒã«ãšããŠäœ¿çšããããšãå¯èœã«ãã匷åãªã©ã€ãã©ãªã§ããæ¢åã®å€ãã®Spacesããã³ã«ã¹ã¿ã SpacesãèšèšããããšããµããŒãããŠããŸãã
æã
ã¯ã`gradio_tools`ã䜿çšããŠ`StableDiffusionPromptGeneratorTool`ããŒã«ã掻çšããããšèããŠããŸãããã®ããŒã«ã¯`gradio-tools`ããŒã«ãããã§æäŸãããŠãããããã³ãããæ¹åããããè¯ãç»åãçæããããã«äœ¿çšããŸãã
ãŸãã`gradio_tools`ããããŒã«ãã€ã³ããŒããããããã€ã³ã¹ã¿ã³ã¹åããŸã:
```python
from gradio_tools import StableDiffusionPromptGeneratorTool
gradio_tool = StableDiffusionPromptGeneratorTool()
```
ãã®ã€ã³ã¹ã¿ã³ã¹ã `Tool.from_gradio` ã¡ãœããã«æž¡ããŸãïŒ
```python
from transformers import Tool
tool = Tool.from_gradio(gradio_tool)
```
ããããã¯ãéåžžã®ã«ã¹ã¿ã ããŒã«ãšåãããã«ããã管çã§ããŸããç§ãã¡ã¯ããã³ãããæ¹åããããã«ããã掻çšããŸãã
` a rabbit wearing a space suit`:
```python
from transformers import HfAgent
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=[tool])
agent.run("Generate an image of the `prompt` after improving it.", prompt="A rabbit wearing a space suit")
```
The model adequately leverages the tool:
```text
==Explanation from the agent==
I will use the following tools: `StableDiffusionPromptGenerator` to improve the prompt, then `image_generator` to generate an image according to the improved prompt.
==Code generated by the agent==
improved_prompt = StableDiffusionPromptGenerator(prompt)
print(f"The improved prompt is {improved_prompt}.")
image = image_generator(improved_prompt)
```
æçµçã«ç»åãçæããåã«ïŒ
![ç»å](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png">
<Tip warning={true}>
gradio-toolsã¯ãããŸããŸãªã¢ããªãã£ã䜿çšããå Žåã§ãã*ããã¹ã*ã®å
¥åãšåºåãå¿
èŠã§ãããã®å®è£
ã¯ç»åãšé³å£°ãªããžã§ã¯ããšé£æºããŸããçŸæç¹ã§ã¯ãããã2ã€ã¯äºææ§ããããŸãããããµããŒããåäžãããããã«åãçµãã§ãããè¿
éã«äºææ§ãåäžããã§ãããã
</Tip>
## Future compatibility with Langchain
ç§ãã¡ã¯LangchainãæããŠãããéåžžã«é
åçãªããŒã«ã®ã¹ã€ãŒããæã£ãŠãããšèããŠããŸãããããã®ããŒã«ãæ±ãããã«ãLangchainã¯ããŸããŸãªã¢ããªãã£ã§äœæ¥ããå Žåã§ãã*ããã¹ã*ã®å
¥åºåãå¿
èŠã§ããããã¯ããªããžã§ã¯ãã®ã·ãªã¢ã«åããŒãžã§ã³ïŒã€ãŸãããã£ã¹ã¯ã«ä¿åãããããŒãžã§ã³ïŒã§ããããšãå€ãã§ãã
ãã®éãã«ãããtransformers-agentsãšlangchainéã§ã¯ãã«ãã¢ããªãã£ãåŠçãããŠããŸããã
ãã®å¶éã¯å°æ¥ã®ããŒãžã§ã³ã§è§£æ±ºãããããšãç®æããŠãããç±å¿ãªlangchainãŠãŒã¶ãŒããã®ä»»æã®æ¯æŽãæè¿ããŸãã
ç§ãã¡ã¯ããè¯ããµããŒããæäŸããããšèããŠããŸãããæäŒãããã ããå Žåã¯ããã²[åé¡ãéããŠ](https://github.com/huggingface/transformers/issues/new)ããèãã®ããšãå
±æããŠãã ããã
| transformers/docs/source/ja/custom_tools.md/0 | {
"file_path": "transformers/docs/source/ja/custom_tools.md",
"repo_id": "transformers",
"token_count": 15519
} | 273 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Generation with LLMs
[[open-in-colab]]
LLMããŸãã¯Large Language ModelsïŒå€§èŠæš¡èšèªã¢ãã«ïŒã¯ãããã¹ãçæã®éµãšãªãèŠçŽ ã§ããèŠããã«ããããã¯å€§èŠæš¡ãªäºåèšç·Žæžã¿ãã©ã³ã¹ãã©ãŒããŒã¢ãã«ã§ãäžããããå
¥åããã¹ãã«åºã¥ããŠæ¬¡ã®åèªïŒãŸãã¯ãããæ£ç¢ºã«ã¯ããŒã¯ã³ïŒãäºæž¬ããããã«èšç·ŽãããŠããŸããããŒã¯ã³ã1ã€ãã€äºæž¬ãããããã¢ãã«ãåŒã³åºãã ãã§ã¯æ°ããæãçæããããã«äœããã粟巧ãªããšãããå¿
èŠããããŸããèªå·±ååž°çæãè¡ãå¿
èŠããããŸãã
èªå·±ååž°çæã¯ãæšè«æã®æç¶ãã§ãããã€ãã®åæå
¥åãäžããç¶æ
ã§ãã¢ãã«ãå埩çã«åŒã³åºãææ³ã§ããð€ Transformersã§ã¯ãããã¯[`~generation.GenerationMixin.generate`]ã¡ãœããã«ãã£ãŠåŠçãããããã¯çæèœåãæã€ãã¹ãŠã®ã¢ãã«ã§å©çšå¯èœã§ãã
ãã®ãã¥ãŒããªã¢ã«ã§ã¯ã以äžã®ããšã瀺ããŸãïŒ
* LLMã䜿çšããŠããã¹ããçæããæ¹æ³
* äžè¬çãªèœãšãç©Žãåé¿ããæ¹æ³
* LLMãæ倧éã«æŽ»çšããããã®æ¬¡ã®ã¹ããã
å§ããåã«ãå¿
èŠãªã©ã€ãã©ãªããã¹ãŠã€ã³ã¹ããŒã«ãããŠããããšã確èªããŠãã ããïŒ
```bash
pip install transformers bitsandbytes>=0.39.0 -q
```
## Generate text
[å æèšèªã¢ããªã³ã°](tasks/language_modeling)ã®ããã«ãã¬ãŒãã³ã°ãããèšèªã¢ãã«ã¯ãããã¹ãããŒã¯ã³ã®ã·ãŒã±ã³ã¹ãå
¥åãšããŠåãåãã次ã®ããŒã¯ã³ã®ç¢ºçååžãè¿ããŸãã
<!-- [GIF 1 -- FWD PASS] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov"
></video>
<figcaption>"Forward pass of an LLM"</figcaption>
</figure>
LLMïŒLanguage ModelïŒã«ããèªå·±ååž°çæã®éèŠãªåŽé¢ã®1ã€ã¯ããã®ç¢ºçååžãã次ã®ããŒã¯ã³ãéžæããæ¹æ³ã§ãããã®ã¹ãããã§ã¯ã次ã®ã€ãã¬ãŒã·ã§ã³ã®ããã®ããŒã¯ã³ãåŸãããéããäœã§ãå¯èœã§ããããã¯ã確çååžããæãå¯èœæ§ã®é«ãããŒã¯ã³ãéžæããã ãã®ã·ã³ãã«ãªæ¹æ³ãããçµæã®ååžãããµã³ããªã³ã°ããåã«æ°ã
ã®å€æãé©çšããã»ã©è€éãªæ¹æ³ãŸã§ãããããæ¹æ³ãèããããŸãã
<!-- [GIF 2 -- TEXT GENERATION] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov"
></video>
<figcaption>"Autoregressive generation iteratively selects the next token from a probability distribution to generate text"</figcaption>
</figure>
äžèšã®ããã»ã¹ã¯ãããåæ¢æ¡ä»¶ãæºãããããŸã§å埩çã«ç¹°ãè¿ãããŸããçæ³çã«ã¯ãåæ¢æ¡ä»¶ã¯ã¢ãã«ã«ãã£ãŠæ瀺ãããã¢ãã«ã¯çµäºã·ãŒã±ã³ã¹ïŒ`EOS`ïŒããŒã¯ã³ãåºåããã¿ã€ãã³ã°ãåŠç¿ãã¹ãã§ãããããããã§ãªãå Žåãçæã¯ãããããå®çŸ©ãããæ倧é·ã«éãããšãã«åæ¢ããŸãã
ããŒã¯ã³éžæã¹ããããšåæ¢æ¡ä»¶ãé©åã«èšå®ããããšã¯ãã¢ãã«ãã¿ã¹ã¯ã§æåŸ
ã©ããã«æ¯ãèãããã«éèŠã§ããããããåã¢ãã«ã«é¢é£ä»ãããã [`~generation.GenerationConfig`] ãã¡ã€ã«ãããçç±ã§ãããããã«ã¯åªããããã©ã«ãã®çæãã©ã¡ãŒã¿åãå«ãŸããã¢ãã«ãšäžç·ã«èªã¿èŸŒãŸããŸãã
ã³ãŒãã«ã€ããŠè©±ããŸãããïŒ
<Tip>
åºæ¬çãªLLMã®äœ¿çšã«èå³ãããå Žåãé«ã¬ãã«ã® [`Pipeline`](pipeline_tutorial) ã€ã³ã¿ãŒãã§ãŒã¹ãè¯ãåºçºç¹ã§ãããã ããLLMã¯ãã°ãã°éååãããŒã¯ã³éžæã¹ãããã®çŽ°ããå¶åŸ¡ãªã©ã®é«åºŠãªæ©èœãå¿
èŠã§ããããã㯠[`~generation.GenerationMixin.generate`] ãä»ããŠæè¯ã«è¡ãããŸããLLMãšã®èªå·±ååž°çæã¯ãªãœãŒã¹ãå€ãå¿
èŠã§ãããé©åãªã¹ã«ãŒãããã®ããã«GPUã§å®è¡ããå¿
èŠããããŸãã
</Tip>
<!-- TODO: llama 2ïŒãŸãã¯ããæ°ããäžè¬çãªããŒã¹ã©ã€ã³ïŒãå©çšå¯èœã«ãªã£ãããäŸãæŽæ°ãã -->
ãŸããã¢ãã«ãèªã¿èŸŒãå¿
èŠããããŸãã
```py
>>> from transformers import AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained(
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
... )
```
`from_pretrained` åŒã³åºãã§2ã€ã®ãã©ã°ãããããšã«æ³šæããŠãã ããïŒ
- `device_map` ã¯ã¢ãã«ãããªãã®GPUã«ç§»åãããŸã
- `load_in_4bit` ã¯[4ãããã®åçéåå](main_classes/quantization)ãé©çšããŠãªãœãŒã¹èŠä»¶ã倧å¹
ã«åæžããŸã
ã¢ãã«ãåæåããä»ã®æ¹æ³ããããŸãããããã¯LLMãå§ããããã®è¯ãåºæºã§ãã
次ã«ã[ããŒã¯ãã€ã¶](tokenizer_summary)ã䜿çšããŠããã¹ãå
¥åãååŠçããå¿
èŠããããŸãã
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
>>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
```
`model_inputs` å€æ°ã¯ãããŒã¯ã³åãããããã¹ãå
¥åãšã¢ãã³ã·ã§ã³ãã¹ã¯ãä¿æããŠããŸãã [`~generation.GenerationMixin.generate`] ã¯ãã¢ãã³ã·ã§ã³ãã¹ã¯ãæž¡ãããŠããªãå Žåã§ããæåã®åªåãããŠãããæšæž¬ããããšããŸãããã§ããéãæž¡ãããšããå§ãããŸããæé©ãªçµæãåŸãããã§ãã
æåŸã«ã[`~generation.GenerationMixin.generate`] ã¡ãœãããåŒã³åºããŠçæãããããŒã¯ã³ãååŸããããã衚瀺ããåã«ããã¹ãã«å€æããå¿
èŠããããŸãã
```py
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A list of colors: red, blue, green, yellow, black, white, and brown'
```
ããã§å®äºã§ãïŒããããªã³ãŒãè¡æ°ã§ãLLMïŒLarge Language ModelïŒã®ãã¯ãŒã掻çšã§ããŸãã
## Common pitfalls
[çææŠç¥](generation_strategies)ã¯ãããããããããã©ã«ãã®å€ãããªãã®ãŠãŒã¹ã±ãŒã¹ã«é©ããŠããªãããšããããŸããåºåãæåŸ
éãã§ãªãå Žåãæãäžè¬çãªèœãšãç©Žãšãã®åé¿æ¹æ³ã®ãªã¹ããäœæããŸããã
```py
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
>>> model = AutoModelForCausalLM.from_pretrained(
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
... )
```
### Generated output is too short/long
[`~generation.GenerationConfig`] ãã¡ã€ã«ã§æå®ãããŠããªãå Žåã`generate` ã¯ããã©ã«ãã§æ倧㧠20 ããŒã¯ã³ãŸã§è¿ããŸããæã
㯠`generate` ã³ãŒã«ã§ `max_new_tokens` ãæåã§èšå®ããããšã匷ããå§ãããŸããããã«ãããè¿ãããæ°ããããŒã¯ã³ã®æ倧æ°ãå¶åŸ¡ã§ããŸããLLMïŒæ£ç¢ºã«ã¯ã[ãã³ãŒããŒå°çšã¢ãã«](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)ïŒãåºåã®äžéšãšããŠå
¥åããã³ãããè¿ãããšã«æ³šæããŠãã ããã
```py
>>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda")
>>> # By default, the output will contain up to 20 tokens
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5'
>>> # Setting `max_new_tokens` allows you to control the maximum length
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=50)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,'
```
### Incorrect generation mode
ããã©ã«ãã§ã¯ã [`~generation.GenerationConfig`] ãã¡ã€ã«ã§æå®ãããŠããªãéãã`generate` ã¯åã€ãã¬ãŒã·ã§ã³ã§æãå¯èœæ§ã®é«ãããŒã¯ã³ãéžæããŸãïŒè²ªæ¬²ãã³ãŒãã£ã³ã°ïŒãã¿ã¹ã¯ã«å¿ããŠãããã¯æãŸãããªãããšããããŸãããã£ãããããããšãã»ã€ã®ãããªåµé çãªã¿ã¹ã¯ã§ã¯ããµã³ããªã³ã°ãæçã§ããäžæ¹ãé³å£°ã®è»¢åã翻蚳ã®ãããªå
¥åã«åºã¥ãã¿ã¹ã¯ã§ã¯ã貪欲ãã³ãŒãã£ã³ã°ãæçã§ãã`do_sample=True` ã§ãµã³ããªã³ã°ãæå¹ã«ã§ããŸãããã®ãããã¯ã«ã€ããŠã®è©³çŽ°ã¯ããã®[ããã°ãã¹ã](https://huggingface.co/blog/how-to-generate)ã§åŠã¶ããšãã§ããŸãã
```py
>>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility
>>> from transformers import set_seed
>>> set_seed(0)
>>> model_inputs = tokenizer(["I am a cat."], return_tensors="pt").to("cuda")
>>> # LLM + greedy decoding = repetitive, boring output
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat. I am a cat. I am a cat. I am a cat'
>>> # With sampling, the output becomes more creative!
>>> generated_ids = model.generate(**model_inputs, do_sample=True)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat.\nI just need to be. I am always.\nEvery time'
```
### Wrong padding side
LLMïŒLarge Language ModelsïŒã¯[ãã³ãŒããŒå°çš](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)ã®ã¢ãŒããã¯ãã£ã§ãããå
¥åããã³ãããç¹°ãè¿ãåŠçããããšãæå³ããŸããå
¥åãåãé·ãã§ãªãå Žåãããããããã£ã³ã°ããå¿
èŠããããŸããLLMã¯ãããããŒã¯ã³ããã®ç¶ããåŠç¿ããŠããªããããå
¥åã¯å·Šããã£ã³ã°ããå¿
èŠããããŸãããŸããçæã«å¯ŸããŠæ³šç®ãã¹ã¯ãæž¡ãå¿ããªãããã«ããŠãã ããïŒ
```py
>>> # The tokenizer initialized above has right-padding active by default: the 1st sequence,
>>> # which is shorter, has padding on the right side. Generation fails.
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)[0]
''
>>> # With left-padding, it works as expected!
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b", padding_side="left")
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'1, 2, 3, 4, 5, 6,'
```
## Further resources
ãªãŒããªã°ã¬ãã·ãçæããã»ã¹ã¯æ¯èŒçç°¡åã§ãããLLMãæ倧éã«æŽ»çšããããšã¯å€ãã®èŠçŽ ã絡ããããææŠçãªè©Šã¿ãšãªããŸããLLMã®äœ¿çšãšç解ãããã«æ·±ããããã®æ¬¡ã®ã¹ãããã«ã€ããŠã¯ä»¥äžã®ãªãœãŒã¹ãã芧ãã ããã
<!-- TODO: æ°ããã¬ã€ãã§å®äº -->
### Advanced generate usage
1. [ã¬ã€ã](generation_strategies)ïŒç°ãªãçææ¹æ³ãå¶åŸ¡ããæ¹æ³ãçææ§æãã¡ã€ã«ã®èšå®æ¹æ³ãåºåã®ã¹ããªãŒãã³ã°æ¹æ³ã«ã€ããŠã®ã¬ã€ã;
2. [`~generation.GenerationConfig`]ã[`~generation.GenerationMixin.generate`]ãããã³[çæé¢é£ã¯ã©ã¹](internal/generation_utils)ã«é¢ããAPIãªãã¡ã¬ã³ã¹ã
### LLM leaderboards
1. [Open LLM ãªãŒããŒããŒã](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)ïŒãªãŒãã³ãœãŒã¹ã¢ãã«ã®å質ã«çŠç¹ãåœãŠããªãŒããŒããŒã;
2. [Open LLM-Perf ãªãŒããŒããŒã](https://huggingface.co/spaces/optimum/llm-perf-leaderboard)ïŒLLMã®ã¹ã«ãŒãããã«çŠç¹ãåœãŠããªãŒããŒããŒãã
### Latency and throughput
1. [ã¬ã€ã](main_classes/quantization)ïŒãã€ãããã¯ã¯ãªã³ã¿ã€ãºã«é¢ããã¬ã€ããããã«ããã¡ã¢ãªèŠä»¶ãåçã«åæžããæ¹æ³ã瀺ãããŠããŸãã
### Related libraries
1. [`text-generation-inference`](https://github.com/huggingface/text-generation-inference)ïŒLLMçšã®æ¬çªåããµãŒããŒ;
2. [`optimum`](https://github.com/huggingface/optimum)ïŒç¹å®ã®ããŒããŠã§ã¢ããã€ã¹åãã«æé©åãããð€ Transformersã®æ¡åŒµã
| transformers/docs/source/ja/llm_tutorial.md/0 | {
"file_path": "transformers/docs/source/ja/llm_tutorial.md",
"repo_id": "transformers",
"token_count": 5622
} | 274 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Quantize ð€ Transformers models
## `AutoGPTQ` Integration
ð€ Transformers ã«ã¯ãèšèªã¢ãã«ã§ GPTQ éååãå®è¡ããããã® `optimum` API ãçµ±åãããŠããŸããããã©ãŒãã³ã¹ã倧å¹
ã«äœäžãããããšãªããæšè«é床ãé«éåããããšãªããã¢ãã«ã 8ã4ã3ãããã«ã¯ 2 ãããã§ããŒãããã³éååã§ããŸããããã¯ãã»ãšãã©ã® GPU ããŒããŠã§ã¢ã§ãµããŒããããŠããŸãã
éååã¢ãã«ã®è©³çŽ°ã«ã€ããŠã¯ã以äžã確èªããŠãã ããã
- [GPTQ](https://arxiv.org/pdf/2210.17323.pdf) è«æ
- GPTQ éååã«é¢ãã `optimum` [ã¬ã€ã](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization)
- ããã¯ãšã³ããšããŠäœ¿çšããã [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) ã©ã€ãã©ãª
### Requirements
以äžã®ã³ãŒããå®è¡ããã«ã¯ã以äžã®èŠä»¶ãã€ã³ã¹ããŒã«ãããŠããå¿
èŠããããŸãïŒ
- ææ°ã® `AutoGPTQ` ã©ã€ãã©ãªãã€ã³ã¹ããŒã«ããã
`pip install auto-gptq` ãã€ã³ã¹ããŒã«ããã
- ææ°ã® `optimum` ããœãŒã¹ããã€ã³ã¹ããŒã«ããã
`git+https://github.com/huggingface/optimum.git` ãã€ã³ã¹ããŒã«ããã
- ææ°ã® `transformers` ããœãŒã¹ããã€ã³ã¹ããŒã«ããã
ææ°ã® `transformers` ããœãŒã¹ããã€ã³ã¹ããŒã«ãã `pip install git+https://github.com/huggingface/transformers.git`
- ææ°ã® `accelerate` ã©ã€ãã©ãªãã€ã³ã¹ããŒã«ããã
`pip install --upgrade accelerate` ãå®è¡ããã
GPTQçµ±åã¯ä»ã®ãšããããã¹ãã¢ãã«ã®ã¿ããµããŒãããŠããã®ã§ãèŠèŠãé³å£°ããã«ãã¢ãŒãã«ã¢ãã«ã§ã¯äºæãã¬æåã«ééãããããããªãããšã«æ³šæããŠãã ããã
### Load and quantize a model
GPTQ ã¯ãéååã¢ãã«ã䜿çšããåã«éã¿ã®ãã£ãªãã¬ãŒã·ã§ã³ãå¿
èŠãšããéååæ¹æ³ã§ãããã©ã³ã¹ãã©ãŒã㌠ã¢ãã«ãæåããéååããå Žåã¯ãéååã¢ãã«ãäœæãããŸã§ã«æéããããããšããããŸã (`facebook/opt-350m`ã¢ãã«ã® Google colab ã§ã¯çŽ 5 å)ã
ãããã£ãŠãGPTQ éååã¢ãã«ã䜿çšããã·ããªãªã¯ 2 ã€ãããŸããæåã®äœ¿çšäŸã¯ãããã§å©çšå¯èœãªä»ã®ãŠãŒã¶ãŒã«ãã£ãŠãã§ã«éååãããã¢ãã«ãããŒãããããšã§ãã2 çªç®ã®äœ¿çšäŸã¯ãã¢ãã«ãæåããéååããä¿åãããããã«ããã·ã¥ããŠãä»ã®ãŠãŒã¶ãŒã䜿çšã§ããããã«ããããšã§ããããã䜿ã£ãŠãã ããã
#### GPTQ Configuration
ã¢ãã«ãããŒãããŠéååããã«ã¯ã[`GPTQConfig`] ãäœæããå¿
èŠããããŸããããŒã¿ã»ãããæºåããã«ã¯ã`bits`ã®æ°ãéååã調æŽããããã®`dataset`ãããã³ã¢ãã«ã®`Tokenizer`ãæž¡ãå¿
èŠããããŸãã
```python
model_id = "facebook/opt-125m"
tokenizer = AutoTokenizer.from_pretrained(model_id)
gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer)
```
ç¬èªã®ããŒã¿ã»ãããæååã®ãªã¹ããšããŠæž¡ãããšãã§ããããšã«æ³šæããŠãã ããããã ããGPTQ è«æã®ããŒã¿ã»ããã䜿çšããããšã匷ããå§ãããŸãã
```python
dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."]
quantization = GPTQConfig(bits=4, dataset = dataset, tokenizer=tokenizer)
```
#### Quantization
`from_pretrained` ã䜿çšãã`quantization_config` ãèšå®ããããšã§ã¢ãã«ãéååã§ããŸãã
```python
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=gptq_config)
```
ã¢ãã«ãéååããã«ã¯ GPU ãå¿
èŠã§ããããšã«æ³šæããŠãã ãããã¢ãã«ã CPU ã«é
眮ããéååããããã«ã¢ãžã¥ãŒã«ã GPU ã«ååŸã«ç§»åãããŸãã
CPU ãªãããŒãã®äœ¿çšäžã« GPU ã®äœ¿çšéãæ倧åãããå Žåã¯ã`device_map = "auto"` ãèšå®ã§ããŸãã
```python
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config)
```
ãã£ã¹ã¯ ãªãããŒãã¯ãµããŒããããŠããªãããšã«æ³šæããŠãã ãããããã«ãããŒã¿ã»ãããåå ã§ã¡ã¢ãªãäžè¶³ããŠããå Žåã¯ã`from_pretained` 㧠`max_memory` ãæž¡ãå¿
èŠãããå ŽåããããŸãã `device_map`ãš`max_memory`ã®è©³çŽ°ã«ã€ããŠã¯ããã® [ã¬ã€ã](https://huggingface.co/docs/accelerate/usage_guides/big_modeling#designing-a-device-map) ãåç
§ããŠãã ããã
<Tip warning={true}>
GPTQ éååã¯ãçŸæç¹ã§ã¯ããã¹ã ã¢ãã«ã§ã®ã¿æ©èœããŸããããã«ãéååããã»ã¹ã¯ããŒããŠã§ã¢ã«ãã£ãŠã¯é·æéãããå ŽåããããŸã (NVIDIA A100 ã䜿çšããå Žåã175B ã¢ãã« = 4 gpu æé)ãã¢ãã«ã® GPTQ éååããŒãžã§ã³ãååšããªãå Žåã¯ãããã§ç¢ºèªããŠãã ãããããã§ãªãå Žåã¯ãgithub ã§èŠæ±ãéä¿¡ã§ããŸãã
</Tip>
### Push quantized model to ð€ Hub
ä»ã® ð€ ã¢ãã«ãšåæ§ã«ã`push_to_hub` ã䜿çšããŠéååã¢ãã«ãããã«ããã·ã¥ã§ããŸããéååæ§æã¯ä¿åãããã¢ãã«ã«æ²¿ã£ãŠããã·ã¥ãããŸãã
```python
quantized_model.push_to_hub("opt-125m-gptq")
tokenizer.push_to_hub("opt-125m-gptq")
```
éååãããã¢ãã«ãããŒã«ã« ãã·ã³ã«ä¿åãããå Žåã¯ã`save_pretrained` ã䜿çšããŠè¡ãããšãã§ããŸãã
```python
quantized_model.save_pretrained("opt-125m-gptq")
tokenizer.save_pretrained("opt-125m-gptq")
```
`device_map` ã䜿çšããŠã¢ãã«ãéååããå Žåã¯ãä¿åããåã«ã¢ãã«å
šäœã GPU ãŸã㯠`cpu` ã®ããããã«ç§»åããŠãã ããã
```python
quantized_model.to("cpu")
quantized_model.save_pretrained("opt-125m-gptq")
```
### Load a quantized model from the ð€ Hub
`from_pretrained`ã䜿çšããŠãéååãããã¢ãã«ãããããããŒãã§ããŸãã
å±æ§ `quantization_config` ãã¢ãã«èšå®ãªããžã§ã¯ãã«ååšããããšã確èªããŠãããã·ã¥ãããéã¿ãéååãããŠããããšã確èªããŸãã
```python
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq")
```
å¿
èŠä»¥äžã®ã¡ã¢ãªãå²ãåœãŠãã«ã¢ãã«ãããéãããŒããããå Žåã¯ã`device_map` åŒæ°ã¯éååã¢ãã«ã§ãæ©èœããŸãã `accelerate`ã©ã€ãã©ãªãã€ã³ã¹ããŒã«ãããŠããããšã確èªããŠãã ããã
```python
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto")
```
### Exllama kernels for faster inference
4 ããã ã¢ãã«ã®å Žåãæšè«é床ãé«ããããã« exllama ã«ãŒãã«ã䜿çšã§ããŸããããã©ã«ãã§æå¹ã«ãªã£ãŠããŸãã [`GPTQConfig`] 㧠`disable_exllama` ãæž¡ãããšã§ããã®åäœãå€æŽã§ããŸããããã«ãããèšå®ã«ä¿åãããŠããéååèšå®ãäžæžããããŸããã«ãŒãã«ã«é¢é£ããå±æ§ã®ã¿ãäžæžãã§ããããšã«æ³šæããŠãã ãããããã«ãexllama ã«ãŒãã«ã䜿çšãããå Žåã¯ãã¢ãã«å
šäœã GPU äžã«çœ®ãå¿
èŠããããŸãã
```py
import torch
gptq_config = GPTQConfig(bits=4, disable_exllama=False)
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config)
```
çŸæç¹ã§ã¯ 4 ããã ã¢ãã«ã®ã¿ããµããŒããããŠããããšã«æ³šæããŠãã ãããããã«ãpeft ã䜿çšããŠéååã¢ãã«ã埮調æŽããŠããå Žåã¯ãexllama ã«ãŒãã«ãéã¢ã¯ãã£ãåããããšããå§ãããŸãã
#### Fine-tune a quantized model
Hugging Face ãšã³ã·ã¹ãã ã®ã¢ããã¿ãŒã®å
¬åŒãµããŒãã«ãããGPTQ ã§éååãããã¢ãã«ã埮調æŽã§ããŸãã
詳现ã«ã€ããŠã¯ã[`peft`](https://github.com/huggingface/peft) ã©ã€ãã©ãªãã芧ãã ããã
### Example demo
GPTQ ã䜿çšããŠã¢ãã«ãéååããæ¹æ³ãšãpeft ã䜿çšããŠéååãããã¢ãã«ã埮調æŽããæ¹æ³ã«ã€ããŠã¯ãGoogle Colab [ããŒãããã¯](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb?usp=sharing) ãåç
§ããŠãã ããã
### GPTQConfig
[[autodoc]] GPTQConfig
## `bitsandbytes` Integration
ð€ Transformers ã¯ã`bitsandbytes` ã§æããã䜿çšãããã¢ãžã¥ãŒã«ãšç·å¯ã«çµ±åãããŠããŸããæ°è¡ã®ã³ãŒãã§ã¢ãã«ã 8 ããã粟床ã§ããŒãã§ããŸãã
ããã¯ã`bitsandbytes`ã® `0.37.0`ãªãªãŒã¹ä»¥éãã»ãšãã©ã® GPU ããŒããŠã§ã¢ã§ãµããŒããããŠããŸãã
éååæ¹æ³ã®è©³çŽ°ã«ã€ããŠã¯ã[LLM.int8()](https://arxiv.org/abs/2208.07339) è«æããŸã㯠[ããã°æçš¿](https://huggingface.co/blog/hf-bitsandbytes-) ãã芧ãã ãããçµ±åïŒã³ã©ãã¬ãŒã·ã§ã³ã«ã€ããŠã
`0.39.0`ãªãªãŒã¹ä»¥éãFP4 ããŒã¿åã掻çšãã4 ãããéååã䜿çšããŠ`device_map`ããµããŒãããä»»æã®ã¢ãã«ãããŒãã§ããŸãã
ç¬èªã® pytorch ã¢ãã«ãéååãããå Žåã¯ãð€ Accelerate ã©ã€ãã©ãªã® [ããã¥ã¡ã³ã](https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization) ããã§ãã¯ããŠãã ããã
`bitsandbytes`çµ±åã䜿çšããŠã§ããããšã¯æ¬¡ã®ãšããã§ã
### General usage
ã¢ãã«ã ð€ Accelerate ã«ããèªã¿èŸŒã¿ããµããŒããã`torch.nn.Linear` ã¬ã€ã€ãŒãå«ãŸããŠããéãã [`~PreTrainedModel.from_pretrained`] ã¡ãœãããåŒã³åºããšãã« `load_in_8bit` ãŸã㯠`load_in_4bit` åŒæ°ã䜿çšããŠã¢ãã«ãéååã§ããŸããããã¯ã©ã®ãããªã¢ããªãã£ã§ãåæ§ã«æ©èœããã¯ãã§ãã
```python
from transformers import AutoModelForCausalLM
model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True)
model_4bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_4bit=True)
```
ããã©ã«ãã§ã¯ãä»ã®ãã¹ãŠã®ã¢ãžã¥ãŒã« (äŸ: `torch.nn.LayerNorm`) 㯠`torch.float16` ã«å€æãããŸããããã® `dtype` ãå€æŽãããå Žåã¯ã`torch_dtype` åŒæ°ãäžæžãã§ããŸãã
```python
>>> import torch
>>> from transformers import AutoModelForCausalLM
>>> model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True, torch_dtype=torch.float32)
>>> model_8bit.model.decoder.layers[-1].final_layer_norm.weight.dtype
torch.float32
```
### FP4 quantization
#### Requirements
以äžã®ã³ãŒã ã¹ãããããå®è¡ããåã«ã以äžã®èŠä»¶ãã€ã³ã¹ããŒã«ãããŠããããšã確èªããŠãã ããã
- ææ°ã®`bitsandbytes`ã©ã€ãã©ãª
`pip install bitsandbytes>=0.39.0`
- ææ°ã®`accelerate`ãã€ã³ã¹ããŒã«ãã
`pip install --upgrade accelerate`
- ææ°ã® `transformers` ãã€ã³ã¹ããŒã«ãã
`pip install --upgrade transformers`
#### Tips and best practices
- **é«åºŠãªäœ¿çšæ³:** å¯èœãªãã¹ãŠã®ãªãã·ã§ã³ã䜿çšãã 4 ãããéååã®é«åºŠãªäœ¿çšæ³ã«ã€ããŠã¯ã[ãã® Google Colab ããŒãããã¯](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf) ãåç
§ããŠãã ããã
- **`batch_size=1` ã«ããé«éæšè« :** bitsandbytes ã® `0.40.0` ãªãªãŒã¹ä»¥éã`batch_size=1` ã§ã¯é«éæšè«ã®æ©æµãåããããšãã§ããŸãã [ãããã®ãªãªãŒã¹ ããŒã](https://github.com/TimDettmers/bitsandbytes/releases/tag/0.40.0) ã確èªãããã®æ©èœã掻çšããã«ã¯`0.40.0`以éã®ããŒãžã§ã³ã䜿çšããŠããããšã確èªããŠãã ãããç®±ã®ã
- **ãã¬ãŒãã³ã°:** [QLoRA è«æ](https://arxiv.org/abs/2305.14314) ã«ãããšã4 ãããåºæ¬ã¢ãã«ããã¬ãŒãã³ã°ããå Žå (äŸ: LoRA ã¢ããã¿ãŒã䜿çš)ã`bnb_4bit_quant_type='nf4'` ã䜿çšããå¿
èŠããããŸãã ã
- **æšè«:** æšè«ã®å Žåã`bnb_4bit_quant_type` ã¯ããã©ãŒãã³ã¹ã«å€§ããªåœ±é¿ãäžããŸããããã ããã¢ãã«ã®éã¿ãšã®äžè²«æ§ãä¿ã€ããã«ãå¿
ãåã `bnb_4bit_compute_dtype` ããã³ `torch_dtype` åŒæ°ã䜿çšããŠãã ããã
#### Load a large model in 4bit
`.from_pretrained` ã¡ãœãããåŒã³åºããšãã« `load_in_4bit=True` ã䜿çšãããšãã¡ã¢ãªäœ¿çšéã (ãããã) 4 ã§å²ãããšãã§ããŸãã
```python
# pip install transformers accelerate bitsandbytes
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "bigscience/bloom-1b7"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
```
<Tip warning={true}>
ã¢ãã«ã 4 ãããã§ããŒãããããšãçŸæç¹ã§ã¯éååãããéã¿ãããã«ããã·ã¥ããããšã¯ã§ããªãããšã«æ³šæããŠãã ããã 4 ãããã®éã¿ã¯ãŸã ãµããŒããããŠããªãããããã¬ãŒãã³ã°ã§ããªãããšã«ã泚æããŠãã ããããã ãã4 ããã ã¢ãã«ã䜿çšããŠè¿œå ã®ãã©ã¡ãŒã¿ãŒããã¬ãŒãã³ã°ããããšãã§ããŸããããã«ã€ããŠã¯æ¬¡ã®ã»ã¯ã·ã§ã³ã§èª¬æããŸãã
</Tip>
### Load a large model in 8bit
`.from_pretrained` ã¡ãœãããåŒã³åºããšãã« `load_in_8bit=True` åŒæ°ã䜿çšãããšãã¡ã¢ãªèŠä»¶ããããååã«ããŠã¢ãã«ãããŒãã§ããŸãã
```python
# pip install transformers accelerate bitsandbytes
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "bigscience/bloom-1b7"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True)
```
次ã«ãéåžž [`PreTrainedModel`] ã䜿çšããã®ãšåãããã«ã¢ãã«ã䜿çšããŸãã
`get_memory_footprint` ã¡ãœããã䜿çšããŠãã¢ãã«ã®ã¡ã¢ãª ãããããªã³ãã確èªã§ããŸãã
```python
print(model.get_memory_footprint())
```
ãã®çµ±åã«ããã倧ããªã¢ãã«ãå°ããªããã€ã¹ã«ããŒãããåé¡ãªãå®è¡ã§ããããã«ãªããŸããã
<Tip warning={true}>
ã¢ãã«ã 8 ãããã§ããŒãããããšãææ°ã® `transformers`ãš`bitsandbytes`ã䜿çšããå Žåãé€ããéååãããéã¿ãããã«ããã·ã¥ããããšã¯çŸåšäžå¯èœã§ããããšã«æ³šæããŠãã ããã 8 ãããã®éã¿ã¯ãŸã ãµããŒããããŠããªãããããã¬ãŒãã³ã°ã§ããªãããšã«ã泚æããŠãã ããããã ãã8 ããã ã¢ãã«ã䜿çšããŠè¿œå ã®ãã©ã¡ãŒã¿ãŒããã¬ãŒãã³ã°ããããšãã§ããŸããããã«ã€ããŠã¯æ¬¡ã®ã»ã¯ã·ã§ã³ã§èª¬æããŸãã
ãŸãã`device_map` ã¯ãªãã·ã§ã³ã§ãããå©çšå¯èœãªãªãœãŒã¹äžã§ã¢ãã«ãå¹ççã«ãã£ã¹ããããããããæšè«ã«ã¯ `device_map = 'auto'` ãèšå®ããããšãæšå¥šãããŸãã
</Tip>
#### Advanced use cases
ããã§ã¯ãFP4 éååã䜿çšããŠå®è¡ã§ããããã€ãã®é«åºŠãªäœ¿çšäŸã«ã€ããŠèª¬æããŸãã
##### Change the compute dtype
compute dtype ã¯ãèšç®äžã«äœ¿çšããã dtype ãå€æŽããããã«äœ¿çšãããŸããããšãã°ãé ãç¶æ
ã¯`float32`ã«ãããŸãããé«éåã®ããã«èšç®ã bf16 ã«èšå®ã§ããŸããããã©ã«ãã§ã¯ãcompute dtype 㯠`float32` ã«èšå®ãããŸãã
```python
import torch
from transformers import BitsAndBytesConfig
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16)
```
##### Using NF4 (Normal Float 4) data type
NF4 ããŒã¿åã䜿çšããããšãã§ããŸããããã¯ãæ£èŠååžã䜿çšããŠåæåãããéã¿ã«é©åããæ°ãã 4 ããã ããŒã¿åã§ãããã®å®è¡ã®ããã«:
```python
from transformers import BitsAndBytesConfig
nf4_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
)
model_nf4 = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=nf4_config)
```
##### Use nested quantization for more memory efficient inference
ãŸãããã¹ããããéååææ³ã䜿çšããããšããå§ãããŸããããã«ãããããã©ãŒãã³ã¹ãè¿œå ããããšãªããããå€ãã®ã¡ã¢ãªãç¯çŽãããŸããçµéšçãªèŠ³å¯ãããããã«ãããNVIDIA-T4 16GB äžã§ã·ãŒã±ã³ã¹é· 1024ãããã ãµã€ãº 1ãåŸé
环ç©ã¹ããã 4 ã® llama-13b ã¢ãã«ã埮調æŽããããšãå¯èœã«ãªããŸãã
```python
from transformers import BitsAndBytesConfig
double_quant_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
)
model_double_quant = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=double_quant_config)
```
### Push quantized models on the ð€ Hub
`push_to_hub`ã¡ãœãããåçŽã«äœ¿çšããããšã§ãéååãããã¢ãã«ãããã«ããã·ã¥ã§ããŸããããã«ãããæåã«éååæ§æãã¡ã€ã«ãããã·ã¥ããã次ã«éååãããã¢ãã«ã®éã¿ãããã·ã¥ãããŸãã
ãã®æ©èœã䜿çšã§ããããã«ããã«ã¯ãå¿
ã `bitsandbytes>0.37.2` ã䜿çšããŠãã ãã (ãã®èšäºã®å·çæç¹ã§ã¯ã`bitsandbytes==0.38.0.post1` ã§ãã¹ãããŸãã)ã
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", device_map="auto", load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
model.push_to_hub("bloom-560m-8bit")
```
<Tip warning={true}>
倧èŠæš¡ãªã¢ãã«ã§ã¯ãããäžã§ 8 ããã ã¢ãã«ãããã·ã¥ããããšã匷ãæšå¥šãããŸããããã«ãããã³ãã¥ããã£ã¯ã¡ã¢ãª ãããããªã³ãã®åæžãšãããšãã° Google Colab ã§ã®å€§èŠæš¡ãªã¢ãã«ã®èªã¿èŸŒã¿ã«ããæ©æµãåããããšãã§ããŸãã
</Tip>
### Load a quantized model from the ð€ Hub
`from_pretrained`ã¡ãœããã䜿çšããŠãããããéååã¢ãã«ãããŒãã§ããŸããå±æ§ `quantization_config` ãã¢ãã«èšå®ãªããžã§ã¯ãã«ååšããããšã確èªããŠãããã·ã¥ãããéã¿ãéååãããŠããããšã確èªããŸãã
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained("{your_username}/bloom-560m-8bit", device_map="auto")
```
ãã®å ŽåãåŒæ° `load_in_8bit=True` ãæå®ããå¿
èŠã¯ãããŸãããã`bitsandbytes` ãš `accelerate` ãã€ã³ã¹ããŒã«ãããŠããããšã確èªããå¿
èŠãããããšã«æ³šæããŠãã ããã
ãŸãã`device_map` ã¯ãªãã·ã§ã³ã§ãããå©çšå¯èœãªãªãœãŒã¹äžã§ã¢ãã«ãå¹ççã«ãã£ã¹ããããããããæšè«ã«ã¯ `device_map = 'auto'` ãèšå®ããããšãæšå¥šãããŸãã
### Advanced use cases
ãã®ã»ã¯ã·ã§ã³ã¯ã8 ããã ã¢ãã«ã®ããŒããšå®è¡ä»¥å€ã«äœãã§ããããæ¢æ±ãããäžçŽãŠãŒã¶ãŒã察象ãšããŠããŸãã
#### Offload between `cpu` and `gpu`
ãã®é«åºŠãªäœ¿çšäŸã® 1 ã€ã¯ãã¢ãã«ãããŒããã`CPU`ãš`GPU`ã®éã§éã¿ããã£ã¹ãããã§ããããšã§ãã CPU äžã§ãã£ã¹ããããããéã¿ã¯ **8 ãããã«å€æãããªã**ããã`float32`ã«ä¿æãããããšã«æ³šæããŠãã ããããã®æ©èœã¯ãéåžžã«å€§èŠæš¡ãªã¢ãã«ãé©åããããã®ã¢ãã«ã GPU ãš CPU ã®éã§ãã£ã¹ãããããããŠãŒã¶ãŒã察象ãšããŠããŸãã
ãŸãã`transformers` ãã [`BitsAndBytesConfig`] ãããŒãããå±æ§ `llm_int8_enable_fp32_cpu_offload` ã `True` ã«èšå®ããŸãã
```python
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
```
`bigscience/bloom-1b7`ã¢ãã«ãããŒãããå¿
èŠãããã`lm_head`ãé€ãã¢ãã«å
šäœã«ââé©åããã®ã«åå㪠GPU RAM ããããšããŸãããããã£ãŠã次ã®ããã«ã«ã¹ã¿ã device_map ãäœæããŸãã
```python
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": "cpu",
"transformer.h": 0,
"transformer.ln_f": 0,
}
```
ãããŠã次ã®ããã«ã¢ãã«ãããŒãããŸãã
```python
model_8bit = AutoModelForCausalLM.from_pretrained(
"bigscience/bloom-1b7",
device_map=device_map,
quantization_config=quantization_config,
)
```
以äžã§ãïŒã¢ãã«ã楜ããã§ãã ããïŒ
#### Play with `llm_int8_threshold`
`llm_int8_threshold` åŒæ°ãæäœããŠãå€ãå€ã®ãããå€ãå€æŽã§ããŸãã å€ãå€ ãšã¯ãç¹å®ã®ãããå€ãã倧ããé ããç¶æ
ã®å€ã§ãã
ããã¯ã`LLM.int8()`è«æã§èª¬æãããŠããå€ãå€æ€åºã®å€ãå€ãããå€ã«å¯Ÿå¿ããŸãããã®ãããå€ãè¶
ããé ãç¶æ
ã®å€ã¯å€ãå€ãšã¿ãªããããããã®å€ã«å¯Ÿããæäœã¯ fp16 ã§å®è¡ãããŸããéåžžãå€ã¯æ£èŠååžããŸããã€ãŸããã»ãšãã©ã®å€ã¯ [-3.5, 3.5] ã®ç¯å²å
ã«ãããŸããã倧èŠæš¡ãªã¢ãã«ã§ã¯å€§ããç°ãªãååžã瀺ãäŸå€çãªç³»çµ±çå€ãå€ãããã€ããããŸãããããã®å€ãå€ã¯ãå€ãã®å Žå [-60, -6] ãŸã㯠[6, 60] ã®ç¯å²å
ã«ãããŸãã Int8 éååã¯ã倧ããã 5 çšåºŠãŸã§ã®å€ã§ã¯ããŸãæ©èœããŸããããããè¶
ãããšãããã©ãŒãã³ã¹ã倧å¹
ã«äœäžããŸããé©åãªããã©ã«ãã®ãããå€ã¯ 6 ã§ãããããäžå®å®ãªã¢ãã« (å°èŠæš¡ãªã¢ãã«ã埮調æŽ) ã§ã¯ãããäœããããå€ãå¿
èŠã«ãªãå ŽåããããŸãã
ãã®åŒæ°ã¯ãã¢ãã«ã®æšè«é床ã«åœ±é¿ãäžããå¯èœæ§ããããŸãããã®ãã©ã¡ãŒã¿ãè©ŠããŠã¿ãŠããŠãŒã¹ã±ãŒã¹ã«æé©ãªãã©ã¡ãŒã¿ãèŠã€ããããšããå§ãããŸãã
```python
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
model_id = "bigscience/bloom-1b7"
quantization_config = BitsAndBytesConfig(
llm_int8_threshold=10,
)
model_8bit = AutoModelForCausalLM.from_pretrained(
model_id,
device_map=device_map,
quantization_config=quantization_config,
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
```
#### Skip the conversion of some modules
äžéšã®ã¢ãã«ã«ã¯ãå®å®æ§ã確ä¿ããããã« 8 ãããã«å€æããå¿
èŠããªãã¢ãžã¥ãŒã«ãããã€ããããŸããããšãã°ããžã¥ãŒã¯ããã¯ã¹ ã¢ãã«ã«ã¯ãã¹ãããããå¿
èŠãããããã€ãã® `lm_head` ã¢ãžã¥ãŒã«ããããŸãã `llm_int8_skip_modules` ã§éãã§ã¿ã
```python
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
model_id = "bigscience/bloom-1b7"
quantization_config = BitsAndBytesConfig(
llm_int8_skip_modules=["lm_head"],
)
model_8bit = AutoModelForCausalLM.from_pretrained(
model_id,
device_map=device_map,
quantization_config=quantization_config,
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
```
#### Fine-tune a model that has been loaded in 8-bit
Hugging Face ãšã³ã·ã¹ãã ã®ã¢ããã¿ãŒã®å
¬åŒãµããŒãã«ããã8 ãããã§ããŒããããã¢ãã«ã埮調æŽã§ããŸãã
ããã«ãããåäžã® Google Colab ã§`flan-t5-large`ã`facebook/opt-6.7b`ãªã©ã®å€§èŠæš¡ã¢ãã«ã埮調æŽããããšãã§ããŸãã詳现ã«ã€ããŠã¯ã[`peft`](https://github.com/huggingface/peft) ã©ã€ãã©ãªãã芧ãã ããã
ãã¬ãŒãã³ã°çšã®ã¢ãã«ãããŒããããšãã« `device_map` ãæž¡ãå¿
èŠããªãããšã«æ³šæããŠãã ãããã¢ãã«ã GPU ã«èªåçã«ããŒããããŸããå¿
èŠã«å¿ããŠãããã€ã¹ ããããç¹å®ã®ããã€ã¹ã«èšå®ããããšãã§ããŸã (äŸ: `cuda:0`ã`0`ã`torch.device('cuda:0')`)ã `device_map=auto`ã¯æšè«ã®ã¿ã«äœ¿çšããå¿
èŠãããããšã«æ³šæããŠãã ããã
### BitsAndBytesConfig
[[autodoc]] BitsAndBytesConfig
## Quantization with ð€ `optimum`
`optimum`ã§ãµããŒããããŠããéååæ¹æ³ã®è©³çŽ°ã«ã€ããŠã¯ã[Optimum ããã¥ã¡ã³ã](https://huggingface.co/docs/optimum/index) ãåç
§ããããããèªåã®ãŠãŒã¹ã±ãŒã¹ã«é©çšã§ãããã©ããã確èªããŠãã ããã
| transformers/docs/source/ja/main_classes/quantization.md/0 | {
"file_path": "transformers/docs/source/ja/main_classes/quantization.md",
"repo_id": "transformers",
"token_count": 10631
} | 275 |