python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
import itertools
from typing import Iterable, Iterator, Optional, List, Any, Callable, Union
import logging
import os
from pathlib import Path
import warnings
from filelock import FileLock, Timeout
import jsonpickle
import torch.distributed as dist
from torch.utils.data import Dataset, IterableDataset, get_worker_info
from allennlp.data.instance import Instance
from allennlp.data.vocabulary import Vocabulary
from allennlp.common import Tqdm, util
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import CacheFile
from allennlp.common.registrable import Registrable
logger = logging.getLogger(__name__)
class AllennlpDataset(Dataset):
"""
An `AllennlpDataset` is created by calling `.read()` on a non-lazy `DatasetReader`.
It's essentially just a thin wrapper around a list of instances.
"""
def __init__(self, instances: List[Instance], vocab: Vocabulary = None):
self.instances = instances
self.vocab = vocab
def __getitem__(self, idx) -> Instance:
if self.vocab is not None:
self.instances[idx].index_fields(self.vocab)
return self.instances[idx]
def __len__(self):
return len(self.instances)
def __iter__(self) -> Iterator[Instance]:
"""
Even though it's not necessary to implement this because Python can infer
this method from `__len__` and `__getitem__`, this helps with type-checking
since `AllennlpDataset` can be considered an `Iterable[Instance]`.
"""
yield from self.instances
def index_with(self, vocab: Vocabulary):
self.vocab = vocab
class AllennlpLazyDataset(IterableDataset):
"""
An `AllennlpLazyDataset` is created by calling `.read()` on a lazy `DatasetReader`.
# Parameters
instance_generator : `Callable[[str], Iterable[Instance]]`
A factory function that creates an iterable of `Instance`s from a file path.
This is usually just `DatasetReader._instance_iterator`.
file_path : `str`
The path to pass to the `instance_generator` function.
vocab : `Vocab`, optional (default = `None`)
An optional vocab. This can also be set later with the `.index_with` method.
"""
def __init__(
self,
instance_generator: Callable[[str], Iterable[Instance]],
file_path: str,
vocab: Vocabulary = None,
) -> None:
super().__init__()
self._instance_generator = instance_generator
self._file_path = file_path
self.vocab = vocab
def __iter__(self) -> Iterator[Instance]:
for instance in self._instance_generator(self._file_path):
if self.vocab is not None:
instance.index_fields(self.vocab)
yield instance
def index_with(self, vocab: Vocabulary):
self.vocab = vocab
class DatasetReader(Registrable):
"""
A `DatasetReader` knows how to turn a file containing a dataset into a collection
of `Instances`. To implement your own, just override the `_read(file_path)` method
to return an `Iterable` of the instances. This could be a list containing the instances
or a lazy generator that returns them one at a time.
All parameters necessary to `_read` the data apart from the filepath should be passed
to the constructor of the `DatasetReader`.
# Parameters
lazy : `bool`, optional (default=`False`)
If this is true, `instances()` will return an object whose `__iter__` method
reloads the dataset each time it's called. Otherwise, `instances()` returns a list.
cache_directory : `str`, optional (default=`None`)
If given, we will use this directory to store a cache of already-processed `Instances` in
every file passed to :func:`read`, serialized (by default, though you can override this) as
one string-formatted `Instance` per line. If the cache file for a given `file_path` exists,
we read the `Instances` from the cache instead of re-processing the data (using
:func:`_instances_from_cache_file`). If the cache file does _not_ exist, we will _create_
it on our first pass through the data (using :func:`_instances_to_cache_file`).
!!! NOTE
It is the _caller's_ responsibility to make sure that this directory is
unique for any combination of code and parameters that you use. That is, if you pass a
directory here, we will use any existing cache files in that directory _regardless of the
parameters you set for this DatasetReader!_
max_instances : `int`, optional (default=`None`)
If given, will stop reading after this many instances. This is a useful setting for debugging.
Setting this disables caching.
manual_distributed_sharding: `bool`, optional (default=`False`)
By default, when used in a distributed setting, `DatasetReader` makes sure that each
worker process only receives a subset of the data. It does this by reading the whole
dataset in each worker, but filtering out the instances that are not needed. If you
can implement a faster mechanism that only reads part of the data, set this to True,
and do the sharding yourself.
manual_multi_process_sharding : `bool`, optional (default=`False`)
This is similar to the `manual_distributed_sharding` parameter, but applies to
multi-process data loading. By default, when this reader is used by a multi-process
data loader (i.e. a `DataLoader` with `num_workers > 1`), each worker will
filter out all but a subset of the instances that are needed so that you
don't end up with duplicates.
!!! NOTE
**There is really no benefit of using a multi-process
`DataLoader` unless you can specifically implement a faster sharding mechanism
within `_read()`**. In that case you should set `manual_multi_process_sharding`
to `True`.
serialization_dir: `str`, optional (default=`None`)
The directory in which the training output is saved to, or the directory the model is loaded from.
"""
CACHE_FILE_LOCK_TIMEOUT: int = 10
"""
The number of seconds to wait for the lock on a cache file to become available.
"""
def __init__(
self,
lazy: bool = False,
cache_directory: Optional[str] = None,
max_instances: Optional[int] = None,
manual_distributed_sharding: bool = False,
manual_multi_process_sharding: bool = False,
serialization_dir: Optional[str] = None,
) -> None:
self.lazy = lazy
self.max_instances = max_instances
self._cache_directory: Optional[Path] = None
if cache_directory:
self._cache_directory = Path(cache_directory)
os.makedirs(self._cache_directory, exist_ok=True)
self.manual_distributed_sharding = manual_distributed_sharding
self.manual_multi_process_sharding = manual_multi_process_sharding
self.serialization_dir = serialization_dir
def read(self, file_path: Union[Path, str]) -> Union[AllennlpDataset, AllennlpLazyDataset]:
"""
Returns an dataset containing all the instances that can be read from the file path.
If `self.lazy` is `False`, this eagerly reads all instances from `self._read()`
and returns an `AllennlpDataset`.
If `self.lazy` is `True`, this returns an `AllennlpLazyDataset`, which internally
relies on the generator created from `self._read()` to lazily produce `Instance`s.
In this case your implementation of `_read()` must also be lazy
(that is, not load all instances into memory at once), otherwise
you will get a `ConfigurationError`.
In either case, the returned `Iterable` can be iterated
over multiple times. It's unlikely you want to override this function,
but if you do your result should likewise be repeatedly iterable.
"""
if not isinstance(file_path, str):
file_path = str(file_path)
lazy = getattr(self, "lazy", None)
if lazy is None:
warnings.warn(
"DatasetReader.lazy is not set, "
"did you forget to call the superclass constructor?",
UserWarning,
)
if lazy:
return AllennlpLazyDataset(self._instance_iterator, file_path)
else:
cache_file: Optional[str] = None
if self._cache_directory:
cache_file = self._get_cache_location_for_file_path(file_path)
if cache_file is not None and os.path.exists(cache_file):
try:
# Try to acquire a lock just to make sure another process isn't in the middle
# of writing to the cache.
cache_file_lock = FileLock(
cache_file + ".lock", timeout=self.CACHE_FILE_LOCK_TIMEOUT
)
cache_file_lock.acquire()
# We make an assumption here that if we can obtain the lock, no one will
# be trying to write to the file anymore, so it should be safe to release the lock
# before reading so that other processes can also read from it.
cache_file_lock.release()
logger.info("Reading instances from cache %s", cache_file)
instances = self._instances_from_cache_file(cache_file)
except Timeout:
logger.warning(
"Failed to acquire lock on dataset cache file within %d seconds. "
"Cannot use cache to read instances.",
self.CACHE_FILE_LOCK_TIMEOUT,
)
instances = self._multi_worker_islice(self._read(file_path))
else:
instances = self._multi_worker_islice(self._read(file_path))
# Then some validation.
if not isinstance(instances, list):
instances = list(instances)
if not instances:
raise ConfigurationError(
"No instances were read from the given filepath {}. "
"Is the path correct?".format(file_path)
)
# And finally we try writing to the cache.
if cache_file is not None and not os.path.exists(cache_file):
if self.max_instances is not None:
# But we don't write to the cache when max_instances is specified.
logger.warning(
"Skipping writing to data cache since max_instances was specified."
)
elif util.is_distributed() or (get_worker_info() and get_worker_info().num_workers):
# We also shouldn't write to the cache if there's more than one process loading
# instances since each worker only receives a partial share of the instances.
logger.warning(
"Can't cache data instances when there are multiple processes loading data"
)
else:
try:
with FileLock(cache_file + ".lock", timeout=self.CACHE_FILE_LOCK_TIMEOUT):
self._instances_to_cache_file(cache_file, instances)
except Timeout:
logger.warning(
"Failed to acquire lock on dataset cache file within %d seconds. "
"Cannot write to cache.",
self.CACHE_FILE_LOCK_TIMEOUT,
)
return AllennlpDataset(instances)
def _get_cache_location_for_file_path(self, file_path: str) -> str:
assert self._cache_directory is not None
return str(self._cache_directory / util.flatten_filename(str(file_path)))
def _read(self, file_path: str) -> Iterable[Instance]:
"""
Reads the instances from the given file_path and returns them as an
`Iterable` (which could be a list or could be a generator).
You are strongly encouraged to use a generator, so that users can
read a dataset in a lazy way, if they so choose.
"""
raise NotImplementedError
def _instances_from_cache_file(self, cache_filename: str) -> Iterable[Instance]:
with open(cache_filename, "r") as cache_file:
yield from self._multi_worker_islice(cache_file, self.deserialize_instance)
def _instances_to_cache_file(self, cache_filename, instances) -> None:
# We serialize to a temp file first in case anything goes wrong while
# writing to cache (e.g., the computer shuts down unexpectedly).
# Then we just copy the file over to `cache_filename`.
with CacheFile(cache_filename, mode="w+") as cache_handle:
logger.info("Caching instances to temp file %s", cache_handle.name)
for instance in Tqdm.tqdm(instances, desc="caching instances"):
cache_handle.write(self.serialize_instance(instance) + "\n")
def text_to_instance(self, *inputs) -> Instance:
"""
Does whatever tokenization or processing is necessary to go from textual input to an
`Instance`. The primary intended use for this is with a
:class:`~allennlp.predictors.predictor.Predictor`, which gets text input as a JSON
object and needs to process it to be input to a model.
The intent here is to share code between :func:`_read` and what happens at
model serving time, or any other time you want to make a prediction from new data. We need
to process the data in the same way it was done at training time. Allowing the
`DatasetReader` to process new text lets us accomplish this, as we can just call
`DatasetReader.text_to_instance` when serving predictions.
The input type here is rather vaguely specified, unfortunately. The `Predictor` will
have to make some assumptions about the kind of `DatasetReader` that it's using, in order
to pass it the right information.
"""
raise NotImplementedError
def serialize_instance(self, instance: Instance) -> str:
"""
Serializes an `Instance` to a string. We use this for caching the processed data.
The default implementation is to use `jsonpickle`. If you would like some other format
for your pre-processed data, override this method.
"""
return jsonpickle.dumps(instance)
def deserialize_instance(self, string: str) -> Instance:
"""
Deserializes an `Instance` from a string. We use this when reading processed data from a
cache.
The default implementation is to use `jsonpickle`. If you would like some other format
for your pre-processed data, override this method.
"""
return jsonpickle.loads(string.strip()) # type: ignore
def _multi_worker_islice(
self,
iterable: Iterable[Any],
transform: Optional[Callable[[Any], Instance]] = None,
ensure_lazy: bool = False,
) -> Iterable[Instance]:
"""
Helper method that determines which raw instances to skip based on the current
node rank (for distributed training) and worker ID (for multi-process data loading).
# Parameters
iterable : `Iterable[Any]`
An iterable that yields raw data that can be transformed into `Instance`s
through the `transform` function.
transform : `Optional[Callable[[Any], Instance]]`, optional (default = `None`)
An optional function that will be applied to the raw data generated
by `iterable` to create `Instance`s. This is used, e.g., when reading
cached data.
ensure_lazy : `bool`, optional (default = `False`)
If `True`, a `ConfigurationError` error will be raised if `iterable`
is a list instead of a lazy generator type.
# Returns
`Iterable[Instance]`
"""
if ensure_lazy and isinstance(iterable, (list, tuple)):
raise ConfigurationError("For a lazy dataset reader, _read() must return a generator")
wrap_with_tqdm = True
start_index = 0
step_size = 1
if not self.manual_distributed_sharding and util.is_distributed():
start_index = dist.get_rank()
step_size = dist.get_world_size()
worker_info = None if self.manual_multi_process_sharding else get_worker_info()
if worker_info:
warnings.warn(
"Using multi-process data loading without setting "
"DatasetReader.manual_multi_process_sharding to True.\n"
"Did you forget to set this?\n"
"If you're not handling the multi-process sharding logic within your "
"_read() method, there is probably no benefit to using more than one "
"worker.",
UserWarning,
)
# Scale `start_index` by `num_workers`, then shift by worker `id`.
start_index *= worker_info.num_workers
start_index += worker_info.id
# Scale `step_size` by `num_workers`.
step_size *= worker_info.num_workers
if worker_info.id > 0:
# We only want to log with tqdm from the main loader process.
wrap_with_tqdm = False
islice = itertools.islice(iterable, start_index, self.max_instances, step_size)
if wrap_with_tqdm:
islice = Tqdm.tqdm(islice, desc="reading instances")
if transform is not None:
return (transform(x) for x in islice)
return islice
def _instance_iterator(self, file_path: str) -> Iterable[Instance]:
cache_file: Optional[str] = None
if self._cache_directory:
cache_file = self._get_cache_location_for_file_path(file_path)
if cache_file is not None and os.path.exists(cache_file):
cache_file_lock = FileLock(cache_file + ".lock", timeout=self.CACHE_FILE_LOCK_TIMEOUT)
try:
cache_file_lock.acquire()
# We make an assumption here that if we can obtain the lock, no one will
# be trying to write to the file anymore, so it should be safe to release the lock
# before reading so that other processes can also read from it.
cache_file_lock.release()
logger.info("Reading instances from cache %s", cache_file)
with open(cache_file) as data_file:
yield from self._multi_worker_islice(
data_file, transform=self.deserialize_instance
)
except Timeout:
logger.warning(
"Failed to acquire lock on dataset cache file within %d seconds. "
"Cannot use cache to read instances.",
self.CACHE_FILE_LOCK_TIMEOUT,
)
yield from self._multi_worker_islice(self._read(file_path), ensure_lazy=True)
elif cache_file is not None and not os.path.exists(cache_file):
instances = self._multi_worker_islice(self._read(file_path), ensure_lazy=True)
# The cache file doesn't exist so we'll try writing to it.
if self.max_instances is not None:
# But we don't write to the cache when max_instances is specified.
logger.warning("Skipping writing to data cache since max_instances was specified.")
yield from instances
elif util.is_distributed() or (get_worker_info() and get_worker_info().num_workers):
# We also shouldn't write to the cache if there's more than one process loading
# instances since each worker only receives a partial share of the instances.
logger.warning(
"Can't cache data instances when there are multiple processes loading data"
)
yield from instances
else:
try:
with FileLock(cache_file + ".lock", timeout=self.CACHE_FILE_LOCK_TIMEOUT):
with CacheFile(cache_file, mode="w+") as cache_handle:
logger.info("Caching instances to temp file %s", cache_handle.name)
for instance in instances:
cache_handle.write(self.serialize_instance(instance) + "\n")
yield instance
except Timeout:
logger.warning(
"Failed to acquire lock on dataset cache file within %d seconds. "
"Cannot write to cache.",
self.CACHE_FILE_LOCK_TIMEOUT,
)
yield from instances
else:
# No cache.
yield from self._multi_worker_islice(self._read(file_path), ensure_lazy=True)
| allennlp-master | allennlp/data/dataset_readers/dataset_reader.py |
import glob
import logging
import os
import torch
from typing import Iterable
from allennlp.common import util
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
logger = logging.getLogger(__name__)
@DatasetReader.register("sharded")
class ShardedDatasetReader(DatasetReader):
"""
Wraps another dataset reader and uses it to read from multiple input files.
Note that in this case the `file_path` passed to `read()` should either be a glob path
or a path or URL to an archive file ('.zip' or '.tar.gz').
The dataset reader will return instances from all files matching the glob, or all
files within the archive.
The order the files are processed in is deterministic to enable the
instances to be filtered according to worker rank in the distributed case.
Registered as a `DatasetReader` with name "sharded".
This class accepts all additional parameters of any `DatasetReader` class via `**kwargs`.
We give priority to the values set in the constructor for the instance of this class.
Optionally, we will automatically inherit attributes from the `base_reader` when required.
# Parameters
base_reader : `DatasetReader`
Reader with a read method that accepts a single file.
"""
def __init__(self, base_reader: DatasetReader, **kwargs) -> None:
# ShardedDatasetReader is a wrapper for the original base_reader so some of the parameters like 'lazy'
# can be safely inherited. However, ShardedDatasetReader is a class instance of a DatasetReader as well.
# So we give priority to the parameters for the current instance stored in 'kwargs'.
# If not present, we check the ones in the base reader
kwargs["lazy"] = kwargs.get("lazy", base_reader.lazy)
super().__init__(manual_distributed_sharding=True, **kwargs)
if util.is_distributed():
self._rank = torch.distributed.get_rank()
self._world_size = torch.distributed.get_world_size()
else:
self._rank = 0
self._world_size = 1
self.reader = base_reader
# We have to check that the base reader doesn't implement manual distributed
# sharding itself, because if it does, then only a fraction of the instances
# will be read.
if getattr(self.reader, "manual_distributed_sharding", False):
raise ValueError(
"The base reader of a sharded dataset reader should not implement "
"manual distributed sharding itself."
)
# However we still need to set this flag to `True` after the fact so that
# all of the instances within each shard are used.
self.reader.manual_distributed_sharding = True
def text_to_instance(self, *args, **kwargs) -> Instance:
"""
Just delegate to the base reader text_to_instance.
"""
return self.reader.text_to_instance(*args, **kwargs) # type: ignore
def _read(self, file_path: str) -> Iterable[Instance]:
try:
maybe_extracted_archive = cached_path(file_path, extract_archive=True)
if not os.path.isdir(maybe_extracted_archive):
# This isn't a directory, so `file_path` is just a file.
raise ConfigurationError(f"{file_path} should be an archive or directory")
shards = [
os.path.join(maybe_extracted_archive, p)
for p in os.listdir(maybe_extracted_archive)
if not p.startswith(".")
]
if not shards:
raise ConfigurationError(f"No files found in {file_path}")
except FileNotFoundError:
# Not a local or remote archive, so treat as a glob.
shards = glob.glob(file_path)
if not shards:
raise ConfigurationError(f"No files found matching {file_path}")
# Ensure a consistent order.
shards.sort()
for i, shard in enumerate(shards):
if i % self._world_size == self._rank:
logger.info(f"reading instances from {shard}")
for instance in self.reader.read(shard):
yield instance
| allennlp-master | allennlp/data/dataset_readers/sharded_dataset_reader.py |
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
from allennlp.data.dataset_readers.dataset_utils.span_utils import bio_tags_to_spans
from allennlp.data.dataset_readers.dataset_utils.span_utils import to_bioul, iob1_to_bioul
from allennlp.data.dataset_readers.dataset_utils.span_utils import bioul_tags_to_spans
| allennlp-master | allennlp/data/dataset_readers/dataset_utils/__init__.py |
from typing import Callable, List, Set, Tuple, TypeVar, Optional
import warnings
from allennlp.common.checks import ConfigurationError
from allennlp.data.tokenizers import Token
TypedSpan = Tuple[int, Tuple[int, int]]
TypedStringSpan = Tuple[str, Tuple[int, int]]
class InvalidTagSequence(Exception):
def __init__(self, tag_sequence=None):
super().__init__()
self.tag_sequence = tag_sequence
def __str__(self):
return " ".join(self.tag_sequence)
T = TypeVar("T", str, Token)
def enumerate_spans(
sentence: List[T],
offset: int = 0,
max_span_width: int = None,
min_span_width: int = 1,
filter_function: Callable[[List[T]], bool] = None,
) -> List[Tuple[int, int]]:
"""
Given a sentence, return all token spans within the sentence. Spans are `inclusive`.
Additionally, you can provide a maximum and minimum span width, which will be used
to exclude spans outside of this range.
Finally, you can provide a function mapping `List[T] -> bool`, which will
be applied to every span to decide whether that span should be included. This
allows filtering by length, regex matches, pos tags or any Spacy `Token`
attributes, for example.
# Parameters
sentence : `List[T]`, required.
The sentence to generate spans for. The type is generic, as this function
can be used with strings, or Spacy `Tokens` or other sequences.
offset : `int`, optional (default = `0`)
A numeric offset to add to all span start and end indices. This is helpful
if the sentence is part of a larger structure, such as a document, which
the indices need to respect.
max_span_width : `int`, optional (default = `None`)
The maximum length of spans which should be included. Defaults to len(sentence).
min_span_width : `int`, optional (default = `1`)
The minimum length of spans which should be included. Defaults to 1.
filter_function : `Callable[[List[T]], bool]`, optional (default = `None`)
A function mapping sequences of the passed type T to a boolean value.
If `True`, the span is included in the returned spans from the
sentence, otherwise it is excluded..
"""
max_span_width = max_span_width or len(sentence)
filter_function = filter_function or (lambda x: True)
spans: List[Tuple[int, int]] = []
for start_index in range(len(sentence)):
last_end_index = min(start_index + max_span_width, len(sentence))
first_end_index = min(start_index + min_span_width - 1, len(sentence))
for end_index in range(first_end_index, last_end_index):
start = offset + start_index
end = offset + end_index
# add 1 to end index because span indices are inclusive.
if filter_function(sentence[slice(start_index, end_index + 1)]):
spans.append((start, end))
return spans
def bio_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to BIO tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans. This function works properly when
the spans are unlabeled (i.e., your labels are simply "B", "I", and "O").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
classes_to_ignore = classes_to_ignore or []
spans: Set[Tuple[str, Tuple[int, int]]] = set()
span_start = 0
span_end = 0
active_conll_tag = None
for index, string_tag in enumerate(tag_sequence):
# Actual BIO tag.
bio_tag = string_tag[0]
if bio_tag not in ["B", "I", "O"]:
raise InvalidTagSequence(tag_sequence)
conll_tag = string_tag[2:]
if bio_tag == "O" or conll_tag in classes_to_ignore:
# The span has ended.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = None
# We don't care about tags we are
# told to ignore, so we do nothing.
continue
elif bio_tag == "B":
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
elif bio_tag == "I" and conll_tag == active_conll_tag:
# We're inside a span.
span_end += 1
else:
# This is the case the bio label is an "I", but either:
# 1) the span hasn't started - i.e. an ill formed span.
# 2) The span is an I tag for a different conll annotation.
# We'll process the previous span if it exists, but also
# include this span. This is important, because otherwise,
# a model may get a perfect F1 score whilst still including
# false positive ill-formed spans.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
return list(spans)
def iob1_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to IOB1 tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e., those where "B-LABEL" is not preceded
by "I-LABEL" or "B-LABEL").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
classes_to_ignore = classes_to_ignore or []
spans: Set[Tuple[str, Tuple[int, int]]] = set()
span_start = 0
span_end = 0
active_conll_tag = None
prev_bio_tag = None
prev_conll_tag = None
for index, string_tag in enumerate(tag_sequence):
curr_bio_tag = string_tag[0]
curr_conll_tag = string_tag[2:]
if curr_bio_tag not in ["B", "I", "O"]:
raise InvalidTagSequence(tag_sequence)
if curr_bio_tag == "O" or curr_conll_tag in classes_to_ignore:
# The span has ended.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = None
elif _iob1_start_of_chunk(prev_bio_tag, prev_conll_tag, curr_bio_tag, curr_conll_tag):
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = curr_conll_tag
span_start = index
span_end = index
else:
# bio_tag == "I" and curr_conll_tag == active_conll_tag
# We're continuing a span.
span_end += 1
prev_bio_tag = string_tag[0]
prev_conll_tag = string_tag[2:]
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
return list(spans)
def _iob1_start_of_chunk(
prev_bio_tag: Optional[str],
prev_conll_tag: Optional[str],
curr_bio_tag: str,
curr_conll_tag: str,
) -> bool:
if curr_bio_tag == "B":
return True
if curr_bio_tag == "I" and prev_bio_tag == "O":
return True
if curr_bio_tag != "O" and prev_conll_tag != curr_conll_tag:
return True
return False
def bioul_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to BIOUL tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are not allowed and will raise `InvalidTagSequence`.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "I", "O", "U", and "L").
# Parameters
tag_sequence : `List[str]`, required.
The tag sequence encoded in BIOUL, e.g. ["B-PER", "L-PER", "O"].
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
"""
spans = []
classes_to_ignore = classes_to_ignore or []
index = 0
while index < len(tag_sequence):
label = tag_sequence[index]
if label[0] == "U":
spans.append((label.partition("-")[2], (index, index)))
elif label[0] == "B":
start = index
while label[0] != "L":
index += 1
if index >= len(tag_sequence):
raise InvalidTagSequence(tag_sequence)
label = tag_sequence[index]
if not (label[0] == "I" or label[0] == "L"):
raise InvalidTagSequence(tag_sequence)
spans.append((label.partition("-")[2], (start, index)))
else:
if label != "O":
raise InvalidTagSequence(tag_sequence)
index += 1
return [span for span in spans if span[0] not in classes_to_ignore]
def iob1_to_bioul(tag_sequence: List[str]) -> List[str]:
warnings.warn(
"iob1_to_bioul has been replaced with 'to_bioul' to allow more encoding options.",
FutureWarning,
)
return to_bioul(tag_sequence)
def to_bioul(tag_sequence: List[str], encoding: str = "IOB1") -> List[str]:
"""
Given a tag sequence encoded with IOB1 labels, recode to BIOUL.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
In the BIO scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of a span.
# Parameters
tag_sequence : `List[str]`, required.
The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"].
encoding : `str`, optional, (default = `"IOB1"`).
The encoding type to convert from. Must be either "IOB1" or "BIO".
# Returns
bioul_sequence : `List[str]`
The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"].
"""
if encoding not in {"IOB1", "BIO"}:
raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.")
def replace_label(full_label, new_label):
# example: full_label = 'I-PER', new_label = 'U', returns 'U-PER'
parts = list(full_label.partition("-"))
parts[0] = new_label
return "".join(parts)
def pop_replace_append(in_stack, out_stack, new_label):
# pop the last element from in_stack, replace the label, append
# to out_stack
tag = in_stack.pop()
new_tag = replace_label(tag, new_label)
out_stack.append(new_tag)
def process_stack(stack, out_stack):
# process a stack of labels, add them to out_stack
if len(stack) == 1:
# just a U token
pop_replace_append(stack, out_stack, "U")
else:
# need to code as BIL
recoded_stack = []
pop_replace_append(stack, recoded_stack, "L")
while len(stack) >= 2:
pop_replace_append(stack, recoded_stack, "I")
pop_replace_append(stack, recoded_stack, "B")
recoded_stack.reverse()
out_stack.extend(recoded_stack)
# Process the tag_sequence one tag at a time, adding spans to a stack,
# then recode them.
bioul_sequence = []
stack: List[str] = []
for label in tag_sequence:
# need to make a dict like
# token = {'token': 'Matt', "labels": {'conll2003': "B-PER"}
# 'gold': 'I-PER'}
# where 'gold' is the raw value from the CoNLL data set
if label == "O" and len(stack) == 0:
bioul_sequence.append(label)
elif label == "O" and len(stack) > 0:
# need to process the entries on the stack plus this one
process_stack(stack, bioul_sequence)
bioul_sequence.append(label)
elif label[0] == "I":
# check if the previous type is the same as this one
# if it is then append to stack
# otherwise this start a new entity if the type
# is different
if len(stack) == 0:
if encoding == "BIO":
raise InvalidTagSequence(tag_sequence)
stack.append(label)
else:
# check if the previous type is the same as this one
this_type = label.partition("-")[2]
prev_type = stack[-1].partition("-")[2]
if this_type == prev_type:
stack.append(label)
else:
if encoding == "BIO":
raise InvalidTagSequence(tag_sequence)
# a new entity
process_stack(stack, bioul_sequence)
stack.append(label)
elif label[0] == "B":
if len(stack) > 0:
process_stack(stack, bioul_sequence)
stack.append(label)
else:
raise InvalidTagSequence(tag_sequence)
# process the stack
if len(stack) > 0:
process_stack(stack, bioul_sequence)
return bioul_sequence
def bmes_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to BMES tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "M", "E" and "S").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
def extract_bmes_tag_label(text):
bmes_tag = text[0]
label = text[2:]
return bmes_tag, label
spans: List[Tuple[str, List[int]]] = []
prev_bmes_tag: Optional[str] = None
for index, tag in enumerate(tag_sequence):
bmes_tag, label = extract_bmes_tag_label(tag)
if bmes_tag in ("B", "S"):
# Regardless of tag, we start a new span when reaching B & S.
spans.append((label, [index, index]))
elif bmes_tag in ("M", "E") and prev_bmes_tag in ("B", "M") and spans[-1][0] == label:
# Only expand the span if
# 1. Valid transition: B/M -> M/E.
# 2. Matched label.
spans[-1][1][1] = index
else:
# Best effort split for invalid span.
spans.append((label, [index, index]))
# update previous BMES tag.
prev_bmes_tag = bmes_tag
classes_to_ignore = classes_to_ignore or []
return [
# to tuple.
(span[0], (span[1][0], span[1][1]))
for span in spans
if span[0] not in classes_to_ignore
]
| allennlp-master | allennlp/data/dataset_readers/dataset_utils/span_utils.py |
from typing import Dict, List, Optional
import itertools
from overrides import overrides
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
_DEFAULT_VALUE = "THIS IS A REALLY UNLIKELY VALUE THAT HAS TO BE A STRING"
@TokenIndexer.register("single_id")
class SingleIdTokenIndexer(TokenIndexer):
"""
This :class:`TokenIndexer` represents tokens as single integers.
Registered as a `TokenIndexer` with name "single_id".
# Parameters
namespace : `Optional[str]`, optional (default=`"tokens"`)
We will use this namespace in the :class:`Vocabulary` to map strings to indices. If you
explicitly pass in `None` here, we will skip indexing and vocabulary lookups. This means
that the `feature_name` you use must correspond to an integer value (like `text_id`, for
instance, which gets set by some tokenizers, such as when using byte encoding).
lowercase_tokens : `bool`, optional (default=`False`)
If `True`, we will call `token.lower()` before getting an index for the token from the
vocabulary.
start_tokens : `List[str]`, optional (default=`None`)
These are prepended to the tokens provided to `tokens_to_indices`.
end_tokens : `List[str]`, optional (default=`None`)
These are appended to the tokens provided to `tokens_to_indices`.
feature_name : `str`, optional (default=`"text"`)
We will use the :class:`Token` attribute with this name as input. This is potentially
useful, e.g., for using NER tags instead of (or in addition to) surface forms as your inputs
(passing `ent_type_` here would do that). If you use a non-default value here, you almost
certainly want to also change the `namespace` parameter, and you might want to give a
`default_value`.
default_value : `str`, optional
When you want to use a non-default `feature_name`, you sometimes want to have a default
value to go with it, e.g., in case you don't have an NER tag for a particular token, for
some reason. This value will get used if we don't find a value in `feature_name`. If this
is not given, we will crash if a token doesn't have a value for the given `feature_name`, so
that you don't get weird, silent errors by default.
token_min_padding_length : `int`, optional (default=`0`)
See :class:`TokenIndexer`.
"""
def __init__(
self,
namespace: Optional[str] = "tokens",
lowercase_tokens: bool = False,
start_tokens: List[str] = None,
end_tokens: List[str] = None,
feature_name: str = "text",
default_value: str = _DEFAULT_VALUE,
token_min_padding_length: int = 0,
) -> None:
super().__init__(token_min_padding_length)
self.namespace = namespace
self.lowercase_tokens = lowercase_tokens
self._start_tokens = [Token(st) for st in (start_tokens or [])]
self._end_tokens = [Token(et) for et in (end_tokens or [])]
self._feature_name = feature_name
self._default_value = default_value
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
if self.namespace is not None:
text = self._get_feature_value(token)
if self.lowercase_tokens:
text = text.lower()
counter[self.namespace][text] += 1
@overrides
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[int]]:
indices: List[int] = []
for token in itertools.chain(self._start_tokens, tokens, self._end_tokens):
text = self._get_feature_value(token)
if self.namespace is None:
# We could have a check here that `text` is an int; not sure it's worth it.
indices.append(text) # type: ignore
else:
if self.lowercase_tokens:
text = text.lower()
indices.append(vocabulary.get_token_index(text, self.namespace))
return {"tokens": indices}
@overrides
def get_empty_token_list(self) -> IndexedTokenList:
return {"tokens": []}
def _get_feature_value(self, token: Token) -> str:
text = getattr(token, self._feature_name)
if text is None:
if self._default_value is not _DEFAULT_VALUE:
text = self._default_value
else:
raise ValueError(
f"{token} did not have attribute {self._feature_name}. If you "
"want to ignore this kind of error, give a default value in the "
"constructor of this indexer."
)
return text
| allennlp-master | allennlp/data/token_indexers/single_id_token_indexer.py |
from typing import Dict, List, Any, Optional
import logging
from overrides import overrides
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import PretrainedTransformerIndexer, TokenIndexer
from allennlp.data.token_indexers.token_indexer import IndexedTokenList
logger = logging.getLogger(__name__)
@TokenIndexer.register("pretrained_transformer_mismatched")
class PretrainedTransformerMismatchedIndexer(TokenIndexer):
"""
Use this indexer when (for whatever reason) you are not using a corresponding
`PretrainedTransformerTokenizer` on your input. We assume that you used a tokenizer that splits
strings into words, while the transformer expects wordpieces as input. This indexer splits the
words into wordpieces and flattens them out. You should use the corresponding
`PretrainedTransformerMismatchedEmbedder` to embed these wordpieces and then pull out a single
vector for each original word.
Registered as a `TokenIndexer` with name "pretrained_transformer_mismatched".
# Parameters
model_name : `str`
The name of the `transformers` model to use.
namespace : `str`, optional (default=`tags`)
We will add the tokens in the pytorch_transformer vocabulary to this vocabulary namespace.
We use a somewhat confusing default value of `tags` so that we do not add padding or UNK
tokens to this namespace, which would break on loading because we wouldn't find our default
OOV token.
max_length : `int`, optional (default = `None`)
If positive, split the document into segments of this many tokens (including special tokens)
before feeding into the embedder. The embedder embeds these segments independently and
concatenate the results to get the original document representation. Should be set to
the same value as the `max_length` option on the `PretrainedTransformerMismatchedEmbedder`.
tokenizer_kwargs : `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
""" # noqa: E501
def __init__(
self,
model_name: str,
namespace: str = "tags",
max_length: int = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
# The matched version v.s. mismatched
self._matched_indexer = PretrainedTransformerIndexer(
model_name,
namespace=namespace,
max_length=max_length,
tokenizer_kwargs=tokenizer_kwargs,
**kwargs,
)
self._allennlp_tokenizer = self._matched_indexer._allennlp_tokenizer
self._tokenizer = self._matched_indexer._tokenizer
self._num_added_start_tokens = self._matched_indexer._num_added_start_tokens
self._num_added_end_tokens = self._matched_indexer._num_added_end_tokens
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
return self._matched_indexer.count_vocab_items(token, counter)
@overrides
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary) -> IndexedTokenList:
self._matched_indexer._add_encoding_to_vocabulary_if_needed(vocabulary)
wordpieces, offsets = self._allennlp_tokenizer.intra_word_tokenize(
[t.ensure_text() for t in tokens]
)
# For tokens that don't correspond to any word pieces, we put (-1, -1) into the offsets.
# That results in the embedding for the token to be all zeros.
offsets = [x if x is not None else (-1, -1) for x in offsets]
output: IndexedTokenList = {
"token_ids": [t.text_id for t in wordpieces],
"mask": [True] * len(tokens), # for original tokens (i.e. word-level)
"type_ids": [t.type_id for t in wordpieces],
"offsets": offsets,
"wordpiece_mask": [True] * len(wordpieces), # for wordpieces (i.e. subword-level)
}
return self._matched_indexer._postprocess_output(output)
@overrides
def get_empty_token_list(self) -> IndexedTokenList:
output = self._matched_indexer.get_empty_token_list()
output["offsets"] = []
output["wordpiece_mask"] = []
return output
@overrides
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
tokens = tokens.copy()
padding_lengths = padding_lengths.copy()
offsets_tokens = tokens.pop("offsets")
offsets_padding_lengths = padding_lengths.pop("offsets")
tensor_dict = self._matched_indexer.as_padded_tensor_dict(tokens, padding_lengths)
tensor_dict["offsets"] = torch.LongTensor(
pad_sequence_to_length(
offsets_tokens, offsets_padding_lengths, default_value=lambda: (0, 0)
)
)
return tensor_dict
def __eq__(self, other):
if isinstance(other, PretrainedTransformerMismatchedIndexer):
for key in self.__dict__:
if key == "_tokenizer":
# This is a reference to a function in the huggingface code, which we can't
# really modify to make this clean. So we special-case it.
continue
if self.__dict__[key] != other.__dict__[key]:
return False
return True
return NotImplemented
| allennlp-master | allennlp/data/token_indexers/pretrained_transformer_mismatched_indexer.py |
"""
A `TokenIndexer` determines how string tokens get represented as arrays of indices in a model.
"""
from allennlp.data.token_indexers.single_id_token_indexer import SingleIdTokenIndexer
from allennlp.data.token_indexers.token_characters_indexer import TokenCharactersIndexer
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from allennlp.data.token_indexers.spacy_indexer import SpacyTokenIndexer
from allennlp.data.token_indexers.pretrained_transformer_indexer import PretrainedTransformerIndexer
from allennlp.data.token_indexers.pretrained_transformer_mismatched_indexer import (
PretrainedTransformerMismatchedIndexer,
)
| allennlp-master | allennlp/data/token_indexers/__init__.py |
from typing import Any, Dict, List
import torch
from allennlp.common import Registrable
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
# An indexed token list represents the arguments that will be passed to a TokenEmbedder
# corresponding to this TokenIndexer. Each argument that the TokenEmbedder needs will have one
# entry in the IndexedTokenList dictionary, and that argument will typically be a list of integers
# (for single ID word embeddings) or a nested list of integers (for character ID word embeddings),
# though it could also be a mask, or any other data that you want to pass.
IndexedTokenList = Dict[str, List[Any]]
class TokenIndexer(Registrable):
"""
A `TokenIndexer` determines how string tokens get represented as arrays of indices in a model.
This class both converts strings into numerical values, with the help of a
:class:`~allennlp.data.vocabulary.Vocabulary`, and it produces actual arrays.
Tokens can be represented as single IDs (e.g., the word "cat" gets represented by the number
34), or as lists of character IDs (e.g., "cat" gets represented by the numbers [23, 10, 18]),
or in some other way that you can come up with (e.g., if you have some structured input you
want to represent in a special way in your data arrays, you can do that here).
# Parameters
token_min_padding_length : `int`, optional (default=`0`)
The minimum padding length required for the :class:`TokenIndexer`. For example,
the minimum padding length of :class:`SingleIdTokenIndexer` is the largest size of
filter when using :class:`CnnEncoder`.
Note that if you set this for one TokenIndexer, you likely have to set it for all
:class:`TokenIndexer` for the same field, otherwise you'll get mismatched tensor sizes.
"""
default_implementation = "single_id"
has_warned_for_as_padded_tensor = False
def __init__(self, token_min_padding_length: int = 0) -> None:
self._token_min_padding_length: int = token_min_padding_length
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
"""
The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training
data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,
token). This method takes a token and a dictionary of counts and increments counts for
whatever vocabulary items are present in the token. If this is a single token ID
representation, the vocabulary item is likely the token itself. If this is a token
characters representation, the vocabulary items are all of the characters in the token.
"""
raise NotImplementedError
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary) -> IndexedTokenList:
"""
Takes a list of tokens and converts them to an `IndexedTokenList`.
This could be just an ID for each token from the vocabulary.
Or it could split each token into characters and return one ID per character.
Or (for instance, in the case of byte-pair encoding) there might not be a clean
mapping from individual tokens to indices, and the `IndexedTokenList` could be a complex
data structure.
"""
raise NotImplementedError
def indices_to_tokens(
self, indexed_tokens: IndexedTokenList, vocabulary: Vocabulary
) -> List[Token]:
"""
Inverse operations of tokens_to_indices. Takes an `IndexedTokenList` and converts it back
into a list of tokens.
"""
raise NotImplementedError
def get_empty_token_list(self) -> IndexedTokenList:
"""
Returns an `already indexed` version of an empty token list. This is typically just an
empty list for whatever keys are used in the indexer.
"""
raise NotImplementedError
def get_padding_lengths(self, indexed_tokens: IndexedTokenList) -> Dict[str, int]:
"""
This method returns a padding dictionary for the given `indexed_tokens` specifying all
lengths that need padding. If all you have is a list of single ID tokens, this is just the
length of the list, and that's what the default implementation will give you. If you have
something more complicated, like a list of character ids for token, you'll need to override
this.
"""
padding_lengths = {}
for key, token_list in indexed_tokens.items():
padding_lengths[key] = max(len(token_list), self._token_min_padding_length)
return padding_lengths
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
"""
This method pads a list of tokens given the input padding lengths (which could actually
truncate things, depending on settings) and returns that padded list of input tokens as a
`Dict[str, torch.Tensor]`. This is a dictionary because there should be one key per
argument that the `TokenEmbedder` corresponding to this class expects in its `forward()`
method (where the argument name in the `TokenEmbedder` needs to make the key in this
dictionary).
The base class implements the case when all you want to do is create a padded `LongTensor`
for every list in the `tokens` dictionary. If your `TokenIndexer` needs more complex
logic than that, you need to override this method.
"""
tensor_dict = {}
for key, val in tokens.items():
if val and isinstance(val[0], bool):
tensor = torch.BoolTensor(
pad_sequence_to_length(val, padding_lengths[key], default_value=lambda: False)
)
else:
tensor = torch.LongTensor(pad_sequence_to_length(val, padding_lengths[key]))
tensor_dict[key] = tensor
return tensor_dict
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| allennlp-master | allennlp/data/token_indexers/token_indexer.py |
from typing import Dict, List
import itertools
import warnings
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
from allennlp.data.tokenizers import Token, CharacterTokenizer
from allennlp.data.vocabulary import Vocabulary
@TokenIndexer.register("characters")
class TokenCharactersIndexer(TokenIndexer):
"""
This :class:`TokenIndexer` represents tokens as lists of character indices.
Registered as a `TokenIndexer` with name "characters".
# Parameters
namespace : `str`, optional (default=`token_characters`)
We will use this namespace in the :class:`Vocabulary` to map the characters in each token
to indices.
character_tokenizer : `CharacterTokenizer`, optional (default=`CharacterTokenizer()`)
We use a :class:`CharacterTokenizer` to handle splitting tokens into characters, as it has
options for byte encoding and other things. The default here is to instantiate a
`CharacterTokenizer` with its default parameters, which uses unicode characters and
retains casing.
start_tokens : `List[str]`, optional (default=`None`)
These are prepended to the tokens provided to `tokens_to_indices`.
end_tokens : `List[str]`, optional (default=`None`)
These are appended to the tokens provided to `tokens_to_indices`.
min_padding_length : `int`, optional (default=`0`)
We use this value as the minimum length of padding. Usually used with :class:`CnnEncoder`, its
value should be set to the maximum value of `ngram_filter_sizes` correspondingly.
token_min_padding_length : `int`, optional (default=`0`)
See :class:`TokenIndexer`.
"""
def __init__(
self,
namespace: str = "token_characters",
character_tokenizer: CharacterTokenizer = CharacterTokenizer(),
start_tokens: List[str] = None,
end_tokens: List[str] = None,
min_padding_length: int = 0,
token_min_padding_length: int = 0,
) -> None:
super().__init__(token_min_padding_length)
if min_padding_length == 0:
url = "https://github.com/allenai/allennlp/issues/1954"
warnings.warn(
"You are using the default value (0) of `min_padding_length`, "
f"which can cause some subtle bugs (more info see {url}). "
"Strongly recommend to set a value, usually the maximum size "
"of the convolutional layer size when using CnnEncoder.",
UserWarning,
)
self._min_padding_length = min_padding_length
self._namespace = namespace
self._character_tokenizer = character_tokenizer
self._start_tokens = [Token(st) for st in (start_tokens or [])]
self._end_tokens = [Token(et) for et in (end_tokens or [])]
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
if token.text is None:
raise ConfigurationError("TokenCharactersIndexer needs a tokenizer that retains text")
for character in self._character_tokenizer.tokenize(token.text):
# If `text_id` is set on the character token (e.g., if we're using byte encoding), we
# will not be using the vocab for this character.
if getattr(character, "text_id", None) is None:
counter[self._namespace][character.text] += 1
@overrides
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[List[int]]]:
indices: List[List[int]] = []
for token in itertools.chain(self._start_tokens, tokens, self._end_tokens):
token_indices: List[int] = []
if token.text is None:
raise ConfigurationError(
"TokenCharactersIndexer needs a tokenizer that retains text"
)
for character in self._character_tokenizer.tokenize(token.text):
if getattr(character, "text_id", None) is not None:
# `text_id` being set on the token means that we aren't using the vocab, we just
# use this id instead.
index = character.text_id
else:
index = vocabulary.get_token_index(character.text, self._namespace)
token_indices.append(index)
indices.append(token_indices)
return {"token_characters": indices}
@overrides
def get_padding_lengths(self, indexed_tokens: IndexedTokenList) -> Dict[str, int]:
padding_lengths = {}
padding_lengths["token_characters"] = max(
len(indexed_tokens["token_characters"]), self._token_min_padding_length
)
max_num_characters = self._min_padding_length
for token in indexed_tokens["token_characters"]:
max_num_characters = max(len(token), max_num_characters) # type: ignore
padding_lengths["num_token_characters"] = max_num_characters
return padding_lengths
@overrides
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
# Pad the tokens.
padded_tokens = pad_sequence_to_length(
tokens["token_characters"],
padding_lengths["token_characters"],
default_value=lambda: [],
)
# Pad the characters within the tokens.
desired_token_length = padding_lengths["num_token_characters"]
longest_token: List[int] = max(tokens["token_characters"], key=len, default=[]) # type: ignore
padding_value = 0
if desired_token_length > len(longest_token):
# Since we want to pad to greater than the longest token, we add a
# "dummy token" so we can take advantage of the fast implementation of itertools.zip_longest.
padded_tokens.append([padding_value] * desired_token_length)
# pad the list of lists to the longest sublist, appending 0's
padded_tokens = list(zip(*itertools.zip_longest(*padded_tokens, fillvalue=padding_value)))
if desired_token_length > len(longest_token):
# Removes the "dummy token".
padded_tokens.pop()
# Truncates all the tokens to the desired length, and return the result.
return {
"token_characters": torch.LongTensor(
[list(token[:desired_token_length]) for token in padded_tokens]
)
}
@overrides
def get_empty_token_list(self) -> IndexedTokenList:
return {"token_characters": []}
| allennlp-master | allennlp/data/token_indexers/token_characters_indexer.py |
from typing import Dict, List
from overrides import overrides
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
from allennlp.data.vocabulary import Vocabulary
def _make_bos_eos(
character: int,
padding_character: int,
beginning_of_word_character: int,
end_of_word_character: int,
max_word_length: int,
):
char_ids = [padding_character] * max_word_length
char_ids[0] = beginning_of_word_character
char_ids[1] = character
char_ids[2] = end_of_word_character
return char_ids
class ELMoCharacterMapper:
"""
Maps individual tokens to sequences of character ids, compatible with ELMo.
To be consistent with previously trained models, we include it here as special of existing
character indexers.
We allow to add optional additional special tokens with designated
character ids with `tokens_to_add`.
"""
max_word_length = 50
# char ids 0-255 come from utf-8 encoding bytes
# assign 256-300 to special chars
beginning_of_sentence_character = 256 # <begin sentence>
end_of_sentence_character = 257 # <end sentence>
beginning_of_word_character = 258 # <begin word>
end_of_word_character = 259 # <end word>
padding_character = 260 # <padding>
beginning_of_sentence_characters = _make_bos_eos(
beginning_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length,
)
end_of_sentence_characters = _make_bos_eos(
end_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length,
)
bos_token = "<S>"
eos_token = "</S>"
def __init__(self, tokens_to_add: Dict[str, int] = None) -> None:
self.tokens_to_add = tokens_to_add or {}
def convert_word_to_char_ids(self, word: str) -> List[int]:
if word in self.tokens_to_add:
char_ids = [ELMoCharacterMapper.padding_character] * ELMoCharacterMapper.max_word_length
char_ids[0] = ELMoCharacterMapper.beginning_of_word_character
char_ids[1] = self.tokens_to_add[word]
char_ids[2] = ELMoCharacterMapper.end_of_word_character
elif word == ELMoCharacterMapper.bos_token:
char_ids = ELMoCharacterMapper.beginning_of_sentence_characters
elif word == ELMoCharacterMapper.eos_token:
char_ids = ELMoCharacterMapper.end_of_sentence_characters
else:
word_encoded = word.encode("utf-8", "ignore")[
: (ELMoCharacterMapper.max_word_length - 2)
]
char_ids = [ELMoCharacterMapper.padding_character] * ELMoCharacterMapper.max_word_length
char_ids[0] = ELMoCharacterMapper.beginning_of_word_character
for k, chr_id in enumerate(word_encoded, start=1):
char_ids[k] = chr_id
char_ids[len(word_encoded) + 1] = ELMoCharacterMapper.end_of_word_character
# +1 one for masking
return [c + 1 for c in char_ids]
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
@TokenIndexer.register("elmo_characters")
class ELMoTokenCharactersIndexer(TokenIndexer):
"""
Convert a token to an array of character ids to compute ELMo representations.
Registered as a `TokenIndexer` with name "elmo_characters".
# Parameters
namespace : `str`, optional (default=`elmo_characters`)
tokens_to_add : `Dict[str, int]`, optional (default=`None`)
If not None, then provides a mapping of special tokens to character
ids. When using pre-trained models, then the character id must be
less then 261, and we recommend using un-used ids (e.g. 1-32).
token_min_padding_length : `int`, optional (default=`0`)
See :class:`TokenIndexer`.
"""
def __init__(
self,
namespace: str = "elmo_characters",
tokens_to_add: Dict[str, int] = None,
token_min_padding_length: int = 0,
) -> None:
super().__init__(token_min_padding_length)
self._namespace = namespace
self._mapper = ELMoCharacterMapper(tokens_to_add)
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
pass
@overrides
def get_empty_token_list(self) -> IndexedTokenList:
return {"elmo_tokens": []}
@overrides
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[List[int]]]:
# TODO(brendanr): Retain the token to index mappings in the vocabulary and remove this
# https://github.com/allenai/allennlp/blob/master/allennlp/data/token_indexers/wordpiece_indexer.py#L113
return {
"elmo_tokens": [self._mapper.convert_word_to_char_ids(t.ensure_text()) for t in tokens]
}
@overrides
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
# Overriding this method only because we need a different padding token than the default.
tensor_dict = {}
def padding_token():
return [0] * ELMoCharacterMapper.max_word_length
tensor_dict["elmo_tokens"] = torch.LongTensor(
pad_sequence_to_length(
tokens["elmo_tokens"], padding_lengths["elmo_tokens"], default_value=padding_token
)
)
return tensor_dict
| allennlp-master | allennlp/data/token_indexers/elmo_indexer.py |
from typing import Dict, List
from overrides import overrides
from spacy.tokens import Token as SpacyToken
import torch
import numpy
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
@TokenIndexer.register("spacy")
class SpacyTokenIndexer(TokenIndexer):
"""
This :class:`SpacyTokenIndexer` represents tokens as word vectors
from a spacy model. You might want to do this for two main reasons;
easier integration with a spacy pipeline and no out of vocabulary
tokens.
Registered as a `TokenIndexer` with name "spacy".
# Parameters
hidden_dim : `int`, optional (default=`96`)
The dimension of the vectors that spacy generates for
representing words.
token_min_padding_length : `int`, optional (default=`0`)
See :class:`TokenIndexer`.
"""
def __init__(self, hidden_dim: int = 96, token_min_padding_length: int = 0) -> None:
self._hidden_dim = hidden_dim
super().__init__(token_min_padding_length)
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
# We are using spacy to generate embeddings directly for our model,
# so we don't need to capture the vocab - it is defined by the spacy
# model we are using instead.
pass
@overrides
def tokens_to_indices(
self, tokens: List[SpacyToken], vocabulary: Vocabulary
) -> Dict[str, List[numpy.ndarray]]:
if not all(isinstance(x, SpacyToken) for x in tokens):
raise ValueError(
"The spacy indexer requires you to use a Tokenizer which produces SpacyTokens."
)
indices: List[numpy.ndarray] = [token.vector for token in tokens]
return {"tokens": indices}
@overrides
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
def padding_token():
return numpy.zeros(self._hidden_dim, dtype=numpy.float32)
tensor = torch.FloatTensor(
pad_sequence_to_length(
tokens["tokens"], padding_lengths["tokens"], default_value=padding_token
)
)
return {"tokens": tensor}
| allennlp-master | allennlp/data/token_indexers/spacy_indexer.py |
from typing import Dict, List, Optional, Tuple, Any
import logging
import torch
from allennlp.common.util import pad_sequence_to_length
from overrides import overrides
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
logger = logging.getLogger(__name__)
@TokenIndexer.register("pretrained_transformer")
class PretrainedTransformerIndexer(TokenIndexer):
"""
This `TokenIndexer` assumes that Tokens already have their indexes in them (see `text_id` field).
We still require `model_name` because we want to form allennlp vocabulary from pretrained one.
This `Indexer` is only really appropriate to use if you've also used a
corresponding :class:`PretrainedTransformerTokenizer` to tokenize your input. Otherwise you'll
have a mismatch between your tokens and your vocabulary, and you'll get a lot of UNK tokens.
Registered as a `TokenIndexer` with name "pretrained_transformer".
# Parameters
model_name : `str`
The name of the `transformers` model to use.
namespace : `str`, optional (default=`tags`)
We will add the tokens in the pytorch_transformer vocabulary to this vocabulary namespace.
We use a somewhat confusing default value of `tags` so that we do not add padding or UNK
tokens to this namespace, which would break on loading because we wouldn't find our default
OOV token.
max_length : `int`, optional (default = `None`)
If not None, split the document into segments of this many tokens (including special tokens)
before feeding into the embedder. The embedder embeds these segments independently and
concatenate the results to get the original document representation. Should be set to
the same value as the `max_length` option on the `PretrainedTransformerEmbedder`.
tokenizer_kwargs : `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
""" # noqa: E501
def __init__(
self,
model_name: str,
namespace: str = "tags",
max_length: int = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._namespace = namespace
self._allennlp_tokenizer = PretrainedTransformerTokenizer(
model_name, tokenizer_kwargs=tokenizer_kwargs
)
self._tokenizer = self._allennlp_tokenizer.tokenizer
self._added_to_vocabulary = False
self._num_added_start_tokens = len(self._allennlp_tokenizer.single_sequence_start_tokens)
self._num_added_end_tokens = len(self._allennlp_tokenizer.single_sequence_end_tokens)
self._max_length = max_length
if self._max_length is not None:
num_added_tokens = len(self._allennlp_tokenizer.tokenize("a")) - 1
self._effective_max_length = ( # we need to take into account special tokens
self._max_length - num_added_tokens
)
if self._effective_max_length <= 0:
raise ValueError(
"max_length needs to be greater than the number of special tokens inserted."
)
def _add_encoding_to_vocabulary_if_needed(self, vocab: Vocabulary) -> None:
"""
Copies tokens from ```transformers``` model's vocab to the specified namespace.
"""
if self._added_to_vocabulary:
return
try:
vocab_items = self._tokenizer.get_vocab().items()
except NotImplementedError:
vocab_items = (
(self._tokenizer.convert_ids_to_tokens(idx), idx)
for idx in range(self._tokenizer.vocab_size)
)
for word, idx in vocab_items:
vocab._token_to_index[self._namespace][word] = idx
vocab._index_to_token[self._namespace][idx] = word
self._added_to_vocabulary = True
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
# If we only use pretrained models, we don't need to do anything here.
pass
@overrides
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary) -> IndexedTokenList:
self._add_encoding_to_vocabulary_if_needed(vocabulary)
indices, type_ids = self._extract_token_and_type_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
output: IndexedTokenList = {
"token_ids": indices,
"mask": [True] * len(indices),
"type_ids": type_ids or [0] * len(indices),
}
return self._postprocess_output(output)
@overrides
def indices_to_tokens(
self, indexed_tokens: IndexedTokenList, vocabulary: Vocabulary
) -> List[Token]:
self._add_encoding_to_vocabulary_if_needed(vocabulary)
token_ids = indexed_tokens["token_ids"]
type_ids = indexed_tokens.get("type_ids")
return [
Token(
text=vocabulary.get_token_from_index(token_ids[i], self._namespace),
text_id=token_ids[i],
type_id=type_ids[i] if type_ids is not None else None,
)
for i in range(len(token_ids))
]
def _extract_token_and_type_ids(self, tokens: List[Token]) -> Tuple[List[int], List[int]]:
"""
Roughly equivalent to `zip(*[(token.text_id, token.type_id) for token in tokens])`,
with some checks.
"""
indices: List[int] = []
type_ids: List[int] = []
for token in tokens:
indices.append(
token.text_id
if token.text_id is not None
else self._tokenizer.convert_tokens_to_ids(token.text)
)
type_ids.append(token.type_id if token.type_id is not None else 0)
return indices, type_ids
def _postprocess_output(self, output: IndexedTokenList) -> IndexedTokenList:
"""
Takes an IndexedTokenList about to be returned by `tokens_to_indices()` and adds any
necessary postprocessing, e.g. long sequence splitting.
The input should have a `"token_ids"` key corresponding to the token indices. They should
have special tokens already inserted.
"""
if self._max_length is not None:
# We prepare long indices by converting them to (assuming max_length == 5)
# [CLS] A B C [SEP] [CLS] D E F [SEP] ...
# Embedder is responsible for folding this 1-d sequence to 2-d and feed to the
# transformer model.
# TODO(zhaofengw): we aren't respecting word boundaries when segmenting wordpieces.
indices = output["token_ids"]
# Strips original special tokens
indices = indices[
self._num_added_start_tokens : len(indices) - self._num_added_end_tokens
]
# Folds indices
folded_indices = [
indices[i : i + self._effective_max_length]
for i in range(0, len(indices), self._effective_max_length)
]
# Adds special tokens to each segment
folded_indices = [
self._tokenizer.build_inputs_with_special_tokens(segment)
for segment in folded_indices
]
# Flattens
indices = [i for segment in folded_indices for i in segment]
output["token_ids"] = indices
output["type_ids"] = [0] * len(indices)
output["segment_concat_mask"] = [True] * len(indices)
return output
@overrides
def get_empty_token_list(self) -> IndexedTokenList:
output: IndexedTokenList = {"token_ids": [], "mask": [], "type_ids": []}
if self._max_length is not None:
output["segment_concat_mask"] = []
return output
@overrides
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
tensor_dict = {}
for key, val in tokens.items():
if key == "type_ids":
padding_value = 0
mktensor = torch.LongTensor
elif key == "mask" or key == "wordpiece_mask":
padding_value = False
mktensor = torch.BoolTensor
elif len(val) > 0 and isinstance(val[0], bool):
padding_value = False
mktensor = torch.BoolTensor
else:
padding_value = self._tokenizer.pad_token_id
if padding_value is None:
padding_value = (
0 # Some tokenizers don't have padding tokens and rely on the mask only.
)
mktensor = torch.LongTensor
tensor = mktensor(
pad_sequence_to_length(
val, padding_lengths[key], default_value=lambda: padding_value
)
)
tensor_dict[key] = tensor
return tensor_dict
def __eq__(self, other):
if isinstance(other, PretrainedTransformerIndexer):
for key in self.__dict__:
if key == "_tokenizer":
# This is a reference to a function in the huggingface code, which we can't
# really modify to make this clean. So we special-case it.
continue
if self.__dict__[key] != other.__dict__[key]:
return False
return True
return NotImplemented
| allennlp-master | allennlp/data/token_indexers/pretrained_transformer_indexer.py |
import logging
import random
from typing import List, Iterable, Iterator, TypeVar
from allennlp.data.samplers import BatchSampler, BucketBatchSampler
from torch.utils import data
logger = logging.getLogger(__name__)
A = TypeVar("A")
@BatchSampler.register("max_tokens_sampler")
class MaxTokensBatchSampler(BucketBatchSampler):
"""
An sampler which by default, argsorts batches with respect to the maximum input lengths `per
batch`. Batches are then created such that the number of tokens in a batch does not exceed the given
maximum number of tokens. You can provide a list of field names and padding keys (or pass none, in which case
they will be inferred) which the dataset will be sorted by before doing this batching, causing inputs
with similar length to be batched together, making computation more efficient (as less time is
wasted on padded elements of the batch).
# Parameters
data_source: `data.Dataset`
The pytorch `Dataset` of allennlp Instances to bucket.
max_tokens : `int`
The maximum number of tokens to include in a batch.
sorting_keys : `List[str]`, optional
To bucket inputs into batches, we want to group the instances by padding length, so that we
minimize the amount of padding necessary per batch. In order to do this, we need to know
which fields need what type of padding, and in what order.
Specifying the right keys for this is a bit cryptic, so if this is not given we try to
auto-detect the right keys by iterating through a few instances upfront, reading all of the
padding keys and seeing which one has the longest length. We use that one for padding.
This should give reasonable results in most cases. Some cases where it might not be the
right thing to do are when you have a `ListField[TextField]`, or when you have a really
long, constant length `ArrayField`.
When you need to specify this yourself, you can create an instance from your dataset and
call `Instance.get_padding_lengths()` to see a list of all keys used in your data. You
should give one or more of those as the sorting keys here.
padding_noise : `float`, optional (default = `0.1`)
When sorting by padding length, we add a bit of noise to the lengths, so that the sorting
isn't deterministic. This parameter determines how much noise we add, as a percentage of
the actual padding value for each instance.
"""
def __init__(
self,
data_source: data.Dataset,
max_tokens: int,
sorting_keys: List[str] = None,
padding_noise: float = 0.1,
):
super().__init__(data_source, -1, sorting_keys, padding_noise, False)
self.max_tokens = max_tokens
def _lazy_groups_of_max_size(
self,
iterable: Iterable[A],
sizes: Iterable[int],
) -> Iterator[List[A]]:
"""
Takes an `iterable` of data and an iterable `sizes` of the same length which represents the sizes of each
corresponding item in `iterable`. The instances from `iterable` are batched such that the total size
of the batch as computed from `sizes` does not exceed `max_size`.
"""
cur_max_size = 0
group: List[A] = []
iterator = iter(iterable)
size_iter = iter(sizes)
for item, size in zip(iterator, size_iter):
if size > self.max_tokens:
logger.warning(
"Found instance of size %d, which is bigger than the expected size for a batch (%d)",
size,
self.max_tokens,
)
group_size = max(size, cur_max_size) * (len(group) + 1)
if group_size > self.max_tokens:
yield group
cur_max_size = 0
group = []
group.append(item)
cur_max_size = max(cur_max_size, size)
if len(group) != 0:
yield group
def __iter__(self) -> Iterable[List[int]]:
indices, lengths = self._argsort_by_padding(self.data_source)
max_lengths = [max(length) for length in lengths]
group_iterator = self._lazy_groups_of_max_size(indices, max_lengths)
batches = [list(group) for group in group_iterator]
random.shuffle(batches)
for batch in batches:
yield batch
def __len__(self):
# There is no easy way to count the number of batches, so we need to iterate and count.
return sum(1 for _ in self)
| allennlp-master | allennlp/data/samplers/max_tokens_batch_sampler.py |
from allennlp.data.samplers.samplers import (
Sampler,
BatchSampler,
SequentialSampler,
SubsetRandomSampler,
WeightedRandomSampler,
RandomSampler,
BasicBatchSampler,
)
from allennlp.data.samplers.bucket_batch_sampler import BucketBatchSampler
from allennlp.data.samplers.max_tokens_batch_sampler import MaxTokensBatchSampler
| allennlp-master | allennlp/data/samplers/__init__.py |
from typing import List, Iterable
from torch.utils import data
from allennlp.common.registrable import Registrable
"""
Duplicates of the pytorch Sampler classes. Broadly, these only exist
so that we can add type hints, meaning we can construct them from configuration
files. You can use these directly from Python code, but they are identical to the
pytorch ones.
"""
class Sampler(Registrable):
"""
A copy of the pytorch [Sampler](https://pytorch.org/docs/stable/_modules/torch/utils/data/sampler.html)
which allows us to register it with `Registrable.`
"""
def __iter__(self) -> Iterable[int]:
raise NotImplementedError
class BatchSampler(Registrable):
"""
A copy of the pytorch
[BatchSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.BatchSampler)
which allows us to register it with `Registrable.`
"""
def __iter__(self) -> Iterable[List[int]]:
raise NotImplementedError
@Sampler.register("sequential")
class SequentialSampler(data.SequentialSampler, Sampler):
"""
A registrable version of pytorch's
[SequentialSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.SequentialSampler).
Registered as a `Sampler` with name "sequential".
In a typical AllenNLP configuration file, `data_source` parameter does not get an entry under
the "sampler", it gets constructed separately.
"""
def __init__(self, data_source: data.Dataset):
super().__init__(data_source)
@Sampler.register("random")
class RandomSampler(data.RandomSampler, Sampler):
"""
A registrable version of pytorch's
[RandomSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.RandomSampler).
Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify `num_samples` to draw.
Registered as a `Sampler` with name "random".
# Parameters
data_source: `Dataset`, required
The dataset to sample from.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"sampler", it gets constructed separately.
replacement : `bool`, optional (default = `False`)
Samples are drawn with replacement if `True`.
num_samples: `int` (default = `len(dataset)`)
The number of samples to draw. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(
self, data_source: data.Dataset, replacement: bool = False, num_samples: int = None
):
super().__init__(data_source, replacement, num_samples)
@Sampler.register("subset_random")
class SubsetRandomSampler(data.SubsetRandomSampler, Sampler):
"""
A registrable version of pytorch's
[SubsetRandomSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.SubsetRandomSampler).
Samples elements randomly from a given list of indices, without replacement.
Registered as a `Sampler` with name "subset_random".
# Parameters
indices: `List[int]`
a sequence of indices to sample from.
"""
def __init__(self, indices: List[int]):
super().__init__(indices)
@Sampler.register("weighted_random")
class WeightedRandomSampler(data.WeightedRandomSampler, Sampler):
"""
A registrable version of pytorch's
[WeightedRandomSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.WeightedRandomSampler).
Samples elements from `[0,...,len(weights)-1]` with given probabilities (weights).
Registered as a `Sampler` with name "weighted_random".
# Parameters:
weights : `List[float]`
A sequence of weights, not necessary summing up to one.
num_samples : `int`
The number of samples to draw.
replacement : `bool`
If ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
# Examples
```python
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
[0, 0, 0, 1, 0]
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
[0, 1, 4, 3, 2]
```
"""
def __init__(self, weights: List[float], num_samples: int, replacement: bool = True):
super().__init__(weights, num_samples, replacement)
@BatchSampler.register("basic")
class BasicBatchSampler(data.BatchSampler, BatchSampler):
"""
A registrable version of pytorch's
[BatchSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.BatchSampler).
Wraps another sampler to yield a mini-batch of indices.
Registered as a `BatchSampler` with name "basic".
# Parameters
sampler: `Sampler`
The base sampler.
batch_size : `int`
The size of the batch.
drop_last : `bool`
If `True`, the sampler will drop the last batch if
its size would be less than batch_size`.
# Examples
```python
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
```
"""
def __init__(self, sampler: Sampler, batch_size: int, drop_last: bool):
super().__init__(sampler, batch_size, drop_last)
| allennlp-master | allennlp/data/samplers/samplers.py |
import logging
from typing import List, Iterable, Tuple, Optional
import random
import math
from torch.utils import data
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import lazy_groups_of
from allennlp.data.instance import Instance
from allennlp.data.samplers import BatchSampler
logger = logging.getLogger(__name__)
def add_noise_to_value(value: int, noise_param: float):
noise_value = value * noise_param
noise = random.uniform(-noise_value, noise_value)
return value + noise
@BatchSampler.register("bucket")
class BucketBatchSampler(BatchSampler):
"""
An sampler which by default, argsorts batches with respect to the maximum input lengths `per
batch`. You can provide a list of field names and padding keys (or pass none, in which case they
will be inferred) which the dataset will be sorted by before doing this batching, causing inputs
with similar length to be batched together, making computation more efficient (as less time is
wasted on padded elements of the batch).
# Parameters
data_source: `data.Dataset`, required
The pytorch `Dataset` of allennlp Instances to bucket.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"batch_sampler", it gets constructed separately.
batch_size : `int`, required
The size of each batch of instances yielded when calling the dataloader.
sorting_keys : `List[str]`, optional
To bucket inputs into batches, we want to group the instances by padding length, so that we
minimize the amount of padding necessary per batch. In order to do this, we need to know
which fields need what type of padding, and in what order.
Specifying the right keys for this is a bit cryptic, so if this is not given we try to
auto-detect the right keys by iterating through a few instances upfront, reading all of the
padding keys and seeing which one has the longest length. We use that one for padding.
This should give reasonable results in most cases. Some cases where it might not be the
right thing to do are when you have a `ListField[TextField]`, or when you have a really
long, constant length `ArrayField`.
When you need to specify this yourself, you can create an instance from your dataset and
call `Instance.get_padding_lengths()` to see a list of all keys used in your data. You
should give one or more of those as the sorting keys here.
padding_noise : `float`, optional (default=`.1`)
When sorting by padding length, we add a bit of noise to the lengths, so that the sorting
isn't deterministic. This parameter determines how much noise we add, as a percentage of
the actual padding value for each instance.
drop_last : `bool`, (default = `False`)
If `True`, the sampler will drop the last batch if
its size would be less than batch_size`.
"""
def __init__(
self,
data_source: data.Dataset,
batch_size: int,
sorting_keys: List[str] = None,
padding_noise: float = 0.1,
drop_last: bool = False,
):
self.vocab = data_source.vocab
self.sorting_keys = sorting_keys
self.padding_noise = padding_noise
self.batch_size = batch_size
self.data_source = data_source
self.drop_last = drop_last
def _argsort_by_padding(
self, instances: Iterable[Instance]
) -> Tuple[List[int], List[List[int]]]:
"""
Argsorts the instances by their padding lengths, using the keys in
`sorting_keys` (in the order in which they are provided). `sorting_keys`
is a list of `(field_name, padding_key)` tuples.
"""
if not self.sorting_keys:
logger.info("No sorting keys given; trying to guess a good one")
self._guess_sorting_keys(instances)
logger.info(f"Using {self.sorting_keys} as the sorting keys")
instances_with_lengths = []
for instance in instances:
# Make sure instance is indexed before calling .get_padding
lengths = []
noisy_lengths = []
for field_name in self.sorting_keys: # type: ignore
if field_name not in instance.fields:
raise ConfigurationError(
f'Sorting key "{field_name}" is not a field in instance. '
f"Available fields/keys are {list(instance.fields.keys())}."
)
lengths.append(len(instance.fields[field_name]))
noisy_lengths.append(add_noise_to_value(lengths[-1], self.padding_noise))
instances_with_lengths.append((noisy_lengths, lengths, instance))
with_indices = [(x, i) for i, x in enumerate(instances_with_lengths)]
with_indices.sort(key=lambda x: x[0][0])
return (
[instance_with_index[-1] for instance_with_index in with_indices],
[instance_with_index[0][1] for instance_with_index in with_indices],
)
def __iter__(self) -> Iterable[List[int]]:
indices, _ = self._argsort_by_padding(self.data_source)
batches = []
for group in lazy_groups_of(indices, self.batch_size):
batch_indices = list(group)
if self.drop_last and len(batch_indices) < self.batch_size:
continue
batches.append(batch_indices)
random.shuffle(batches)
for batch in batches:
yield batch
def _guess_sorting_keys(self, instances: Iterable[Instance], num_instances: int = 10) -> None:
"""
Use `num_instances` instances from the dataset to infer the keys used
for sorting the dataset for bucketing.
# Parameters
instances : `Iterable[Instance]`, required.
The dataset to guess sorting keys for.
num_instances : `int`, optional (default = `10`)
The number of instances to use to guess sorting keys. Typically
the default value is completely sufficient, but if your instances
are not homogeneous, you might need more.
"""
max_length = 0.0
longest_field: Optional[str] = None
for i, instance in enumerate(instances):
instance.index_fields(self.vocab)
for field_name, field in instance.fields.items():
length = len(field)
if length > max_length:
max_length = length
longest_field = field_name
if i > num_instances:
# Only use num_instances instances to guess the sorting keys.
break
if not longest_field:
# This shouldn't ever happen (you basically have to have an empty instance list), but
# just in case...
raise AssertionError(
"Found no field that needed padding; we are surprised you got this error, please "
"open an issue on github"
)
self.sorting_keys = [longest_field]
def __len__(self):
batch_count_float = len(self.data_source) / self.batch_size
if self.drop_last:
return math.floor(batch_count_float)
else:
return math.ceil(batch_count_float)
| allennlp-master | allennlp/data/samplers/bucket_batch_sampler.py |
from typing import Any, Dict, List, Mapping
from overrides import overrides
from allennlp.data.fields.field import DataArray, Field
class MetadataField(Field[DataArray], Mapping[str, Any]):
"""
A `MetadataField` is a `Field` that does not get converted into tensors. It just carries
side information that might be needed later on, for computing some third-party metric, or
outputting debugging information, or whatever else you need. We use this in the BiDAF model,
for instance, to keep track of question IDs and passage token offsets, so we can more easily
use the official evaluation script to compute metrics.
We don't try to do any kind of smart combination of this field for batched input - when you use
this `Field` in a model, you'll get a list of metadata objects, one for each instance in the
batch.
# Parameters
metadata : `Any`
Some object containing the metadata that you want to store. It's likely that you'll want
this to be a dictionary, but it could be anything you want.
"""
__slots__ = ["metadata"]
def __init__(self, metadata: Any) -> None:
self.metadata = metadata
def __getitem__(self, key: str) -> Any:
try:
return self.metadata[key] # type: ignore
except TypeError:
raise TypeError("your metadata is not a dict")
def __iter__(self):
try:
return iter(self.metadata)
except TypeError:
raise TypeError("your metadata is not iterable")
def __len__(self):
try:
return len(self.metadata)
except TypeError:
raise TypeError("your metadata has no length")
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> DataArray:
return self.metadata # type: ignore
@overrides
def empty_field(self) -> "MetadataField":
return MetadataField(None)
@overrides
def batch_tensors(self, tensor_list: List[DataArray]) -> List[DataArray]: # type: ignore
return tensor_list
def __str__(self) -> str:
return "MetadataField (print field.metadata to see specific information)."
| allennlp-master | allennlp/data/fields/metadata_field.py |
from copy import deepcopy
from typing import Dict, Generic, List, TypeVar
import torch
from allennlp.data.vocabulary import Vocabulary
DataArray = TypeVar(
"DataArray", torch.Tensor, Dict[str, torch.Tensor], Dict[str, Dict[str, torch.Tensor]]
)
class Field(Generic[DataArray]):
"""
A `Field` is some piece of a data instance that ends up as an tensor in a model (either as an
input or an output). Data instances are just collections of fields.
Fields go through up to two steps of processing: (1) tokenized fields are converted into token
ids, (2) fields containing token ids (or any other numeric data) are padded (if necessary) and
converted into tensors. The `Field` API has methods around both of these steps, though they
may not be needed for some concrete `Field` classes - if your field doesn't have any strings
that need indexing, you don't need to implement `count_vocab_items` or `index`. These
methods `pass` by default.
Once a vocabulary is computed and all fields are indexed, we will determine padding lengths,
then intelligently batch together instances and pad them into actual tensors.
"""
__slots__ = [] # type: ignore
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
"""
If there are strings in this field that need to be converted into integers through a
:class:`Vocabulary`, here is where we count them, to determine which tokens are in or out
of the vocabulary.
If your `Field` does not have any strings that need to be converted into indices, you do
not need to implement this method.
A note on this `counter`: because `Fields` can represent conceptually different things,
we separate the vocabulary items by `namespaces`. This way, we can use a single shared
mechanism to handle all mappings from strings to integers in all fields, while keeping
words in a `TextField` from sharing the same ids with labels in a `LabelField` (e.g.,
"entailment" or "contradiction" are labels in an entailment task)
Additionally, a single `Field` might want to use multiple namespaces - `TextFields` can
be represented as a combination of word ids and character ids, and you don't want words and
characters to share the same vocabulary - "a" as a word should get a different id from "a"
as a character, and the vocabulary sizes of words and characters are very different.
Because of this, the first key in the `counter` object is a `namespace`, like "tokens",
"token_characters", "tags", or "labels", and the second key is the actual vocabulary item.
"""
pass
def index(self, vocab: Vocabulary):
"""
Given a :class:`Vocabulary`, converts all strings in this field into (typically) integers.
This `modifies` the `Field` object, it does not return anything.
If your `Field` does not have any strings that need to be converted into indices, you do
not need to implement this method.
"""
pass
def get_padding_lengths(self) -> Dict[str, int]:
"""
If there are things in this field that need padding, note them here. In order to pad a
batch of instance, we get all of the lengths from the batch, take the max, and pad
everything to that length (or use a pre-specified maximum length). The return value is a
dictionary mapping keys to lengths, like `{'num_tokens': 13}`.
This is always called after :func:`index`.
"""
raise NotImplementedError
def as_tensor(self, padding_lengths: Dict[str, int]) -> DataArray:
"""
Given a set of specified padding lengths, actually pad the data in this field and return a
torch Tensor (or a more complex data structure) of the correct shape. We also take a
couple of parameters that are important when constructing torch Tensors.
# Parameters
padding_lengths : `Dict[str, int]`
This dictionary will have the same keys that were produced in
:func:`get_padding_lengths`. The values specify the lengths to use when padding each
relevant dimension, aggregated across all instances in a batch.
"""
raise NotImplementedError
def empty_field(self) -> "Field":
"""
So that `ListField` can pad the number of fields in a list (e.g., the number of answer
option `TextFields`), we need a representation of an empty field of each type. This
returns that. This will only ever be called when we're to the point of calling
:func:`as_tensor`, so you don't need to worry about `get_padding_lengths`,
`count_vocab_items`, etc., being called on this empty field.
We make this an instance method instead of a static method so that if there is any state
in the Field, we can copy it over (e.g., the token indexers in `TextField`).
"""
raise NotImplementedError
def batch_tensors(self, tensor_list: List[DataArray]) -> DataArray: # type: ignore
"""
Takes the output of `Field.as_tensor()` from a list of `Instances` and merges it into
one batched tensor for this `Field`. The default implementation here in the base class
handles cases where `as_tensor` returns a single torch tensor per instance. If your
subclass returns something other than this, you need to override this method.
This operation does not modify `self`, but in some cases we need the information
contained in `self` in order to perform the batching, so this is an instance method, not
a class method.
"""
return torch.stack(tensor_list)
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
# With the way "slots" classes work, self.__slots__ only gives the slots defined
# by the current class, but not any of its base classes. Therefore to truly
# check for equality we have to check through all of the slots in all of the
# base classes as well.
for class_ in self.__class__.mro():
for attr in getattr(class_, "__slots__", []):
if getattr(self, attr) != getattr(other, attr):
return False
# It's possible that a subclass was not defined as a slots class, in which
# case we'll need to check __dict__.
if hasattr(self, "__dict__"):
return self.__dict__ == other.__dict__
return True
return NotImplemented
def __len__(self):
raise NotImplementedError
def duplicate(self):
return deepcopy(self)
| allennlp-master | allennlp/data/fields/field.py |
from typing import Dict, List, Set, Tuple, Optional
import logging
import textwrap
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.field import Field
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
class AdjacencyField(Field[torch.Tensor]):
"""
A `AdjacencyField` defines directed adjacency relations between elements
in a :class:`~allennlp.data.fields.sequence_field.SequenceField`.
Because it's a labeling of some other field, we take that field as input here
and use it to determine our padding and other things.
This field will get converted into an array of shape (sequence_field_length, sequence_field_length),
where the (i, j)th array element is either a binary flag indicating there is an edge from i to j,
or an integer label k, indicating there is a label from i to j of type k.
# Parameters
indices : `List[Tuple[int, int]]`
sequence_field : `SequenceField`
A field containing the sequence that this `AdjacencyField` is labeling. Most often,
this is a `TextField`, for tagging edge relations between tokens in a sentence.
labels : `List[str]`, optional, (default = `None`)
Optional labels for the edges of the adjacency matrix.
label_namespace : `str`, optional (default=`'labels'`)
The namespace to use for converting tag strings into integers. We convert tag strings to
integers for you, and this parameter tells the `Vocabulary` object which mapping from
strings to integers to use (so that "O" as a tag doesn't get the same id as "O" as a word).
padding_value : `int`, optional (default = `-1`)
The value to use as padding.
"""
__slots__ = [
"indices",
"labels",
"sequence_field",
"_label_namespace",
"_padding_value",
"_indexed_labels",
]
# It is possible that users want to use this field with a namespace which uses OOV/PAD tokens.
# This warning will be repeated for every instantiation of this class (i.e for every data
# instance), spewing a lot of warnings so this class variable is used to only log a single
# warning per namespace.
_already_warned_namespaces: Set[str] = set()
def __init__(
self,
indices: List[Tuple[int, int]],
sequence_field: SequenceField,
labels: List[str] = None,
label_namespace: str = "labels",
padding_value: int = -1,
) -> None:
self.indices = indices
self.labels = labels
self.sequence_field = sequence_field
self._label_namespace = label_namespace
self._padding_value = padding_value
self._indexed_labels: Optional[List[int]] = None
self._maybe_warn_for_namespace(label_namespace)
field_length = sequence_field.sequence_length()
if len(set(indices)) != len(indices):
raise ConfigurationError(f"Indices must be unique, but found {indices}")
if not all(
0 <= index[1] < field_length and 0 <= index[0] < field_length for index in indices
):
raise ConfigurationError(
f"Label indices and sequence length "
f"are incompatible: {indices} and {field_length}"
)
if labels is not None and len(indices) != len(labels):
raise ConfigurationError(
f"Labelled indices were passed, but their lengths do not match: "
f" {labels}, {indices}"
)
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if not (self._label_namespace.endswith("labels") or self._label_namespace.endswith("tags")):
if label_namespace not in self._already_warned_namespaces:
logger.warning(
"Your label namespace was '%s'. We recommend you use a namespace "
"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by "
"default to your vocabulary. See documentation for "
"`non_padded_namespaces` parameter in Vocabulary.",
self._label_namespace,
)
self._already_warned_namespaces.add(label_namespace)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._indexed_labels is None and self.labels is not None:
for label in self.labels:
counter[self._label_namespace][label] += 1 # type: ignore
@overrides
def index(self, vocab: Vocabulary):
if self.labels is not None:
self._indexed_labels = [
vocab.get_token_index(label, self._label_namespace) for label in self.labels
]
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"num_tokens": self.sequence_field.sequence_length()}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
desired_num_tokens = padding_lengths["num_tokens"]
tensor = torch.ones(desired_num_tokens, desired_num_tokens) * self._padding_value
labels = self._indexed_labels or [1 for _ in range(len(self.indices))]
for index, label in zip(self.indices, labels):
tensor[index] = label
return tensor
@overrides
def empty_field(self) -> "AdjacencyField":
# The empty_list here is needed for mypy
empty_list: List[Tuple[int, int]] = []
adjacency_field = AdjacencyField(
empty_list, self.sequence_field.empty_field(), padding_value=self._padding_value
)
return adjacency_field
def __str__(self) -> str:
length = self.sequence_field.sequence_length()
formatted_labels = "".join(
"\t\t" + labels + "\n" for labels in textwrap.wrap(repr(self.labels), 100)
)
formatted_indices = "".join(
"\t\t" + index + "\n" for index in textwrap.wrap(repr(self.indices), 100)
)
return (
f"AdjacencyField of length {length}\n"
f"\t\twith indices:\n {formatted_indices}\n"
f"\t\tand labels:\n {formatted_labels} \t\tin namespace: '{self._label_namespace}'."
)
def __len__(self):
return len(self.sequence_field)
| allennlp-master | allennlp/data/fields/adjacency_field.py |
from typing import Dict
from overrides import overrides
import torch
from allennlp.data.fields.field import Field
from allennlp.data.fields.sequence_field import SequenceField
class SpanField(Field[torch.Tensor]):
"""
A `SpanField` is a pair of inclusive, zero-indexed (start, end) indices into a
:class:`~allennlp.data.fields.sequence_field.SequenceField`, used to represent a span of text.
Because it's a pair of indices into a :class:`SequenceField`, we take one of those as input
to make the span's dependence explicit and to validate that the span is well defined.
# Parameters
span_start : `int`, required.
The index of the start of the span in the :class:`SequenceField`.
span_end : `int`, required.
The inclusive index of the end of the span in the :class:`SequenceField`.
sequence_field : `SequenceField`, required.
A field containing the sequence that this `SpanField` is a span inside.
"""
__slots__ = ["span_start", "span_end", "sequence_field"]
def __init__(self, span_start: int, span_end: int, sequence_field: SequenceField) -> None:
self.span_start = span_start
self.span_end = span_end
self.sequence_field = sequence_field
if not isinstance(span_start, int) or not isinstance(span_end, int):
raise TypeError(
f"SpanFields must be passed integer indices. Found span indices: "
f"({span_start}, {span_end}) with types "
f"({type(span_start)} {type(span_end)})"
)
if span_start > span_end:
raise ValueError(
f"span_start must be less than span_end, " f"but found ({span_start}, {span_end})."
)
if span_end > self.sequence_field.sequence_length() - 1:
raise ValueError(
f"span_end must be <= len(sequence_length) - 1, but found "
f"{span_end} and {self.sequence_field.sequence_length() - 1} respectively."
)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
tensor = torch.LongTensor([self.span_start, self.span_end])
return tensor
@overrides
def empty_field(self):
return SpanField(-1, -1, self.sequence_field.empty_field())
def __str__(self) -> str:
return f"SpanField with spans: ({self.span_start}, {self.span_end})."
def __eq__(self, other) -> bool:
if isinstance(other, tuple) and len(other) == 2:
return other == (self.span_start, self.span_end)
return super().__eq__(other)
def __len__(self):
return 2
| allennlp-master | allennlp/data/fields/span_field.py |
"""
A :class:`~allennlp.data.fields.field.Field` is some piece of data instance
that ends up as an array in a model.
"""
from allennlp.data.fields.field import Field
from allennlp.data.fields.adjacency_field import AdjacencyField
from allennlp.data.fields.array_field import ArrayField
from allennlp.data.fields.flag_field import FlagField
from allennlp.data.fields.index_field import IndexField
from allennlp.data.fields.label_field import LabelField
from allennlp.data.fields.list_field import ListField
from allennlp.data.fields.metadata_field import MetadataField
from allennlp.data.fields.multilabel_field import MultiLabelField
from allennlp.data.fields.namespace_swapping_field import NamespaceSwappingField
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.fields.sequence_label_field import SequenceLabelField
from allennlp.data.fields.span_field import SpanField
from allennlp.data.fields.text_field import TextField
| allennlp-master | allennlp/data/fields/__init__.py |
from typing import Dict, List
from overrides import overrides
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers import Token
from allennlp.data.fields.field import Field
class NamespaceSwappingField(Field[torch.Tensor]):
"""
A `NamespaceSwappingField` is used to map tokens in one namespace to tokens in another namespace.
It is used by seq2seq models with a copy mechanism that copies tokens from the source
sentence into the target sentence.
# Parameters
source_tokens : `List[Token]`
The tokens from the source sentence.
target_namespace : `str`
The namespace that the tokens from the source sentence will be mapped to.
"""
__slots__ = ["_source_tokens", "_target_namespace", "_mapping_array"]
def __init__(self, source_tokens: List[Token], target_namespace: str) -> None:
self._source_tokens = source_tokens
self._target_namespace = target_namespace
self._mapping_array: List[int] = []
@overrides
def index(self, vocab: Vocabulary):
self._mapping_array = [
vocab.get_token_index(x.ensure_text(), self._target_namespace)
for x in self._source_tokens
]
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"num_tokens": len(self._source_tokens)}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
desired_length = padding_lengths["num_tokens"]
padded_tokens = pad_sequence_to_length(self._mapping_array, desired_length)
tensor = torch.LongTensor(padded_tokens)
return tensor
@overrides
def empty_field(self) -> "NamespaceSwappingField":
empty_field = NamespaceSwappingField([], self._target_namespace)
empty_field._mapping_array = []
return empty_field
def __len__(self):
return len(self._source_tokens)
| allennlp-master | allennlp/data/fields/namespace_swapping_field.py |
from typing import Dict, List, Union, Set, Iterator
import logging
import textwrap
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.fields.field import Field
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
class SequenceLabelField(Field[torch.Tensor]):
"""
A `SequenceLabelField` assigns a categorical label to each element in a
:class:`~allennlp.data.fields.sequence_field.SequenceField`.
Because it's a labeling of some other field, we take that field as input here, and we use it to
determine our padding and other things.
This field will get converted into a list of integer class ids, representing the correct class
for each element in the sequence.
# Parameters
labels : `Union[List[str], List[int]]`
A sequence of categorical labels, encoded as strings or integers. These could be POS tags like [NN,
JJ, ...], BIO tags like [B-PERS, I-PERS, O, O, ...], or any other categorical tag sequence. If the
labels are encoded as integers, they will not be indexed using a vocab.
sequence_field : `SequenceField`
A field containing the sequence that this `SequenceLabelField` is labeling. Most often, this is a
`TextField`, for tagging individual tokens in a sentence.
label_namespace : `str`, optional (default=`'labels'`)
The namespace to use for converting tag strings into integers. We convert tag strings to
integers for you, and this parameter tells the `Vocabulary` object which mapping from
strings to integers to use (so that "O" as a tag doesn't get the same id as "O" as a word).
"""
__slots__ = [
"labels",
"sequence_field",
"_label_namespace",
"_indexed_labels",
"_skip_indexing",
]
# It is possible that users want to use this field with a namespace which uses OOV/PAD tokens.
# This warning will be repeated for every instantiation of this class (i.e for every data
# instance), spewing a lot of warnings so this class variable is used to only log a single
# warning per namespace.
_already_warned_namespaces: Set[str] = set()
def __init__(
self,
labels: Union[List[str], List[int]],
sequence_field: SequenceField,
label_namespace: str = "labels",
) -> None:
self.labels = labels
self.sequence_field = sequence_field
self._label_namespace = label_namespace
self._indexed_labels = None
self._maybe_warn_for_namespace(label_namespace)
if len(labels) != sequence_field.sequence_length():
raise ConfigurationError(
"Label length and sequence length "
"don't match: %d and %d" % (len(labels), sequence_field.sequence_length())
)
self._skip_indexing = False
if all(isinstance(x, int) for x in labels):
self._indexed_labels = labels
self._skip_indexing = True
elif not all(isinstance(x, str) for x in labels):
raise ConfigurationError(
"SequenceLabelFields must be passed either all "
"strings or all ints. Found labels {} with "
"types: {}.".format(labels, [type(x) for x in labels])
)
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if not (self._label_namespace.endswith("labels") or self._label_namespace.endswith("tags")):
if label_namespace not in self._already_warned_namespaces:
logger.warning(
"Your label namespace was '%s'. We recommend you use a namespace "
"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by "
"default to your vocabulary. See documentation for "
"`non_padded_namespaces` parameter in Vocabulary.",
self._label_namespace,
)
self._already_warned_namespaces.add(label_namespace)
# Sequence methods
def __iter__(self) -> Iterator[Union[str, int]]:
return iter(self.labels)
def __getitem__(self, idx: int) -> Union[str, int]:
return self.labels[idx]
def __len__(self) -> int:
return len(self.labels)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._indexed_labels is None:
for label in self.labels:
counter[self._label_namespace][label] += 1 # type: ignore
@overrides
def index(self, vocab: Vocabulary):
if not self._skip_indexing:
self._indexed_labels = [
vocab.get_token_index(label, self._label_namespace) # type: ignore
for label in self.labels
]
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"num_tokens": self.sequence_field.sequence_length()}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
if self._indexed_labels is None:
raise ConfigurationError(
"You must call .index(vocabulary) on a field before calling .as_tensor()"
)
desired_num_tokens = padding_lengths["num_tokens"]
padded_tags = pad_sequence_to_length(self._indexed_labels, desired_num_tokens)
tensor = torch.LongTensor(padded_tags)
return tensor
@overrides
def empty_field(self) -> "SequenceLabelField":
# The empty_list here is needed for mypy
empty_list: List[str] = []
sequence_label_field = SequenceLabelField(empty_list, self.sequence_field.empty_field())
sequence_label_field._indexed_labels = empty_list
return sequence_label_field
def __str__(self) -> str:
length = self.sequence_field.sequence_length()
formatted_labels = "".join(
"\t\t" + labels + "\n" for labels in textwrap.wrap(repr(self.labels), 100)
)
return (
f"SequenceLabelField of length {length} with "
f"labels:\n {formatted_labels} \t\tin namespace: '{self._label_namespace}'."
)
| allennlp-master | allennlp/data/fields/sequence_label_field.py |
from typing import Dict, Union, Set
import logging
from overrides import overrides
import torch
from allennlp.data.fields.field import Field
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
class LabelField(Field[torch.Tensor]):
"""
A `LabelField` is a categorical label of some kind, where the labels are either strings of
text or 0-indexed integers (if you wish to skip indexing by passing skip_indexing=True).
If the labels need indexing, we will use a :class:`Vocabulary` to convert the string labels
into integers.
This field will get converted into an integer index representing the class label.
# Parameters
label : `Union[str, int]`
label_namespace : `str`, optional (default=`"labels"`)
The namespace to use for converting label strings into integers. We map label strings to
integers for you (e.g., "entailment" and "contradiction" get converted to 0, 1, ...),
and this namespace tells the `Vocabulary` object which mapping from strings to integers
to use (so "entailment" as a label doesn't get the same integer id as "entailment" as a
word). If you have multiple different label fields in your data, you should make sure you
use different namespaces for each one, always using the suffix "labels" (e.g.,
"passage_labels" and "question_labels").
skip_indexing : `bool`, optional (default=`False`)
If your labels are 0-indexed integers, you can pass in this flag, and we'll skip the indexing
step. If this is `False` and your labels are not strings, this throws a `ConfigurationError`.
"""
__slots__ = ["label", "_label_namespace", "_label_id", "_skip_indexing"]
# Most often, you probably don't want to have OOV/PAD tokens with a LabelField, so we warn you
# about it when you pick a namespace that will getting these tokens by default. It is
# possible, however, that you _do_ actually want OOV/PAD tokens with this Field. This class
# variable is used to make sure that we only log a single warning for this per namespace, and
# not every time you create one of these Field objects.
_already_warned_namespaces: Set[str] = set()
def __init__(
self, label: Union[str, int], label_namespace: str = "labels", skip_indexing: bool = False
) -> None:
self.label = label
self._label_namespace = label_namespace
self._label_id = None
self._maybe_warn_for_namespace(label_namespace)
self._skip_indexing = skip_indexing
if skip_indexing:
if not isinstance(label, int):
raise ConfigurationError(
"In order to skip indexing, your labels must be integers. "
"Found label = {}".format(label)
)
self._label_id = label
elif not isinstance(label, str):
raise ConfigurationError(
"LabelFields must be passed a string label if skip_indexing=False. "
"Found label: {} with type: {}.".format(label, type(label))
)
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if not (self._label_namespace.endswith("labels") or self._label_namespace.endswith("tags")):
if label_namespace not in self._already_warned_namespaces:
logger.warning(
"Your label namespace was '%s'. We recommend you use a namespace "
"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by "
"default to your vocabulary. See documentation for "
"`non_padded_namespaces` parameter in Vocabulary.",
self._label_namespace,
)
self._already_warned_namespaces.add(label_namespace)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._label_id is None:
counter[self._label_namespace][self.label] += 1 # type: ignore
@overrides
def index(self, vocab: Vocabulary):
if not self._skip_indexing:
self._label_id = vocab.get_token_index(
self.label, self._label_namespace # type: ignore
)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
tensor = torch.tensor(self._label_id, dtype=torch.long)
return tensor
@overrides
def empty_field(self):
return LabelField(-1, self._label_namespace, skip_indexing=True)
def __str__(self) -> str:
return f"LabelField with label: {self.label} in namespace: '{self._label_namespace}'."
def __len__(self):
return 1
| allennlp-master | allennlp/data/fields/label_field.py |
from typing import Dict, Union, Sequence, Set, Optional, cast
import logging
from overrides import overrides
import torch
from allennlp.data.fields.field import Field
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
class MultiLabelField(Field[torch.Tensor]):
"""
A `MultiLabelField` is an extension of the :class:`LabelField` that allows for multiple labels.
It is particularly useful in multi-label classification where more than one label can be correct.
As with the :class:`LabelField`, labels are either strings of text or 0-indexed integers (if you wish
to skip indexing by passing skip_indexing=True).
If the labels need indexing, we will use a :class:`Vocabulary` to convert the string labels
into integers.
This field will get converted into a vector of length equal to the vocabulary size with
one hot encoding for the labels (all zeros, and ones for the labels).
# Parameters
labels : `Sequence[Union[str, int]]`
label_namespace : `str`, optional (default=`"labels"`)
The namespace to use for converting label strings into integers. We map label strings to
integers for you (e.g., "entailment" and "contradiction" get converted to 0, 1, ...),
and this namespace tells the `Vocabulary` object which mapping from strings to integers
to use (so "entailment" as a label doesn't get the same integer id as "entailment" as a
word). If you have multiple different label fields in your data, you should make sure you
use different namespaces for each one, always using the suffix "labels" (e.g.,
"passage_labels" and "question_labels").
skip_indexing : `bool`, optional (default=`False`)
If your labels are 0-indexed integers, you can pass in this flag, and we'll skip the indexing
step. If this is `False` and your labels are not strings, this throws a `ConfigurationError`.
num_labels : `int`, optional (default=`None`)
If `skip_indexing=True`, the total number of possible labels should be provided, which is required
to decide the size of the output tensor. `num_labels` should equal largest label id + 1.
If `skip_indexing=False`, `num_labels` is not required.
"""
__slots__ = ["labels", "_label_namespace", "_label_ids", "_num_labels"]
# It is possible that users want to use this field with a namespace which uses OOV/PAD tokens.
# This warning will be repeated for every instantiation of this class (i.e for every data
# instance), spewing a lot of warnings so this class variable is used to only log a single
# warning per namespace.
_already_warned_namespaces: Set[str] = set()
def __init__(
self,
labels: Sequence[Union[str, int]],
label_namespace: str = "labels",
skip_indexing: bool = False,
num_labels: Optional[int] = None,
) -> None:
self.labels = labels
self._label_namespace = label_namespace
self._label_ids = None
self._maybe_warn_for_namespace(label_namespace)
self._num_labels = num_labels
if skip_indexing and self.labels:
if not all(isinstance(label, int) for label in labels):
raise ConfigurationError(
"In order to skip indexing, your labels must be integers. "
"Found labels = {}".format(labels)
)
if not num_labels:
raise ConfigurationError("In order to skip indexing, num_labels can't be None.")
if not all(cast(int, label) < num_labels for label in labels):
raise ConfigurationError(
"All labels should be < num_labels. "
"Found num_labels = {} and labels = {} ".format(num_labels, labels)
)
self._label_ids = labels
else:
if not all(isinstance(label, str) for label in labels):
raise ConfigurationError(
"MultiLabelFields expects string labels if skip_indexing=False. "
"Found labels: {}".format(labels)
)
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if not (label_namespace.endswith("labels") or label_namespace.endswith("tags")):
if label_namespace not in self._already_warned_namespaces:
logger.warning(
"Your label namespace was '%s'. We recommend you use a namespace "
"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by "
"default to your vocabulary. See documentation for "
"`non_padded_namespaces` parameter in Vocabulary.",
self._label_namespace,
)
self._already_warned_namespaces.add(label_namespace)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._label_ids is None:
for label in self.labels:
counter[self._label_namespace][label] += 1 # type: ignore
@overrides
def index(self, vocab: Vocabulary):
if self._label_ids is None:
self._label_ids = [
vocab.get_token_index(label, self._label_namespace) # type: ignore
for label in self.labels
]
if not self._num_labels:
self._num_labels = vocab.get_vocab_size(self._label_namespace)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
tensor = torch.zeros(self._num_labels, dtype=torch.long) # vector of zeros
if self._label_ids:
tensor.scatter_(0, torch.LongTensor(self._label_ids), 1)
return tensor
@overrides
def empty_field(self):
return MultiLabelField(
[], self._label_namespace, skip_indexing=True, num_labels=self._num_labels
)
def __str__(self) -> str:
return (
f"MultiLabelField with labels: {self.labels} in namespace: '{self._label_namespace}'.'"
)
def __len__(self):
return 1
| allennlp-master | allennlp/data/fields/multilabel_field.py |
from allennlp.data.fields.field import DataArray, Field
class SequenceField(Field[DataArray]):
"""
A `SequenceField` represents a sequence of things. This class just adds a method onto
`Field`: :func:`sequence_length`. It exists so that `SequenceLabelField`, `IndexField` and other
similar `Fields` can have a single type to require, with a consistent API, whether they are
pointing to words in a `TextField`, items in a `ListField`, or something else.
"""
__slots__ = [] # type: ignore
def sequence_length(self) -> int:
"""
How many elements are there in this sequence?
"""
raise NotImplementedError
def empty_field(self) -> "SequenceField":
raise NotImplementedError
| allennlp-master | allennlp/data/fields/sequence_field.py |
from typing import Dict
import numpy
import torch
from overrides import overrides
from allennlp.data.fields.field import Field
class ArrayField(Field[numpy.ndarray]):
"""
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
__slots__ = ["array", "padding_value", "dtype"]
def __init__(
self, array: numpy.ndarray, padding_value: int = 0, dtype: numpy.dtype = numpy.float32
) -> None:
self.array = array
self.padding_value = padding_value
self.dtype = dtype
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape for i, shape in enumerate(self.array.shape)}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)] for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar
# (it'd end up not being an ndarray otherwise).
# Also, the explicit dtype declaration for `asarray` is necessary for scalars.
return_array = numpy.asarray(
numpy.ones(max_shape, dtype=self.dtype) * self.padding_value, dtype=self.dtype
)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [
0 for _ in range(len(max_shape) - len(self.array.shape))
]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
tensor = torch.from_numpy(return_array)
return tensor
@overrides
def empty_field(self):
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return ArrayField(
numpy.array([], dtype=self.dtype), padding_value=self.padding_value, dtype=self.dtype
)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.shape} and dtype: {self.dtype}."
def __len__(self):
return 1 if self.array.ndim == 0 else self.array.shape[0]
def __eq__(self, other) -> bool:
return numpy.array_equal(self.array, other.array)
| allennlp-master | allennlp/data/fields/array_field.py |
from typing import Any, Dict, List
from overrides import overrides
from allennlp.data.fields.field import Field
class FlagField(Field[Any]):
"""
A class representing a flag, which must be constant across all instances in a batch.
This will be passed to a `forward` method as a single value of whatever type you pass in.
"""
__slots__ = ["flag_value"]
def __init__(self, flag_value: Any) -> None:
self.flag_value = flag_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Any:
return self.flag_value
@overrides
def empty_field(self):
# Because this has to be constant across all instances in a batch, we need to keep the same
# value.
return FlagField(self.flag_value)
def __str__(self) -> str:
return f"FlagField({self.flag_value})"
def __len__(self) -> int:
return 1
@overrides
def batch_tensors(self, tensor_list: List[Any]) -> Any:
if len(set(tensor_list)) != 1:
raise ValueError(
f"Got different values in a FlagField when trying to batch them: {tensor_list}"
)
return tensor_list[0]
| allennlp-master | allennlp/data/fields/flag_field.py |
"""
A `TextField` represents a string of text, the kind that you might want to represent with
standard word vectors, or pass through an LSTM.
"""
from collections import defaultdict
from copy import deepcopy
from typing import Dict, List, Optional, Iterator
import textwrap
from overrides import overrides
from spacy.tokens import Token as SpacyToken
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
# There are two levels of dictionaries here: the top level is for the *key*, which aligns
# TokenIndexers with their corresponding TokenEmbedders. The bottom level is for the *objects*
# produced by a given TokenIndexer, which will be input to a particular TokenEmbedder's forward()
# method. We label these as tensors, because that's what they typically are, though they could in
# reality have arbitrary type.
TextFieldTensors = Dict[str, Dict[str, torch.Tensor]]
class TextField(SequenceField[TextFieldTensors]):
"""
This `Field` represents a list of string tokens. Before constructing this object, you need
to tokenize raw strings using a :class:`~allennlp.data.tokenizers.tokenizer.Tokenizer`.
Because string tokens can be represented as indexed arrays in a number of ways, we also take a
dictionary of :class:`~allennlp.data.token_indexers.token_indexer.TokenIndexer`
objects that will be used to convert the tokens into indices.
Each `TokenIndexer` could represent each token as a single ID, or a list of character IDs, or
something else.
This field will get converted into a dictionary of arrays, one for each `TokenIndexer`. A
`SingleIdTokenIndexer` produces an array of shape (num_tokens,), while a
`TokenCharactersIndexer` produces an array of shape (num_tokens, num_characters).
"""
__slots__ = ["tokens", "_token_indexers", "_indexed_tokens"]
def __init__(self, tokens: List[Token], token_indexers: Dict[str, TokenIndexer]) -> None:
self.tokens = tokens
self._token_indexers = token_indexers
self._indexed_tokens: Optional[Dict[str, IndexedTokenList]] = None
if not all(isinstance(x, (Token, SpacyToken)) for x in tokens):
raise ConfigurationError(
"TextFields must be passed Tokens. "
"Found: {} with types {}.".format(tokens, [type(x) for x in tokens])
)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
for indexer in self._token_indexers.values():
for token in self.tokens:
indexer.count_vocab_items(token, counter)
@overrides
def index(self, vocab: Vocabulary):
self._indexed_tokens = {}
for indexer_name, indexer in self._token_indexers.items():
self._indexed_tokens[indexer_name] = indexer.tokens_to_indices(self.tokens, vocab)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
The `TextField` has a list of `Tokens`, and each `Token` gets converted into arrays by
(potentially) several `TokenIndexers`. This method gets the max length (over tokens)
associated with each of these arrays.
"""
if self._indexed_tokens is None:
raise ConfigurationError(
"You must call .index(vocabulary) on a field before determining padding lengths."
)
padding_lengths = {}
for indexer_name, indexer in self._token_indexers.items():
indexer_lengths = indexer.get_padding_lengths(self._indexed_tokens[indexer_name])
for key, length in indexer_lengths.items():
padding_lengths[f"{indexer_name}___{key}"] = length
return padding_lengths
@overrides
def sequence_length(self) -> int:
return len(self.tokens)
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> TextFieldTensors:
if self._indexed_tokens is None:
raise ConfigurationError(
"You must call .index(vocabulary) on a field before calling .as_tensor()"
)
tensors = {}
indexer_lengths: Dict[str, Dict[str, int]] = defaultdict(dict)
for key, value in padding_lengths.items():
# We want this to crash if the split fails. Should never happen, so I'm not
# putting in a check, but if you fail on this line, open a github issue.
indexer_name, padding_key = key.split("___")
indexer_lengths[indexer_name][padding_key] = value
for indexer_name, indexer in self._token_indexers.items():
tensors[indexer_name] = indexer.as_padded_tensor_dict(
self._indexed_tokens[indexer_name], indexer_lengths[indexer_name]
)
return tensors
@overrides
def empty_field(self):
text_field = TextField([], self._token_indexers)
text_field._indexed_tokens = {}
for indexer_name, indexer in self._token_indexers.items():
text_field._indexed_tokens[indexer_name] = indexer.get_empty_token_list()
return text_field
@overrides
def batch_tensors(self, tensor_list: List[TextFieldTensors]) -> TextFieldTensors:
# This is creating a dict of {token_indexer_name: {token_indexer_outputs: batched_tensor}}
# for each token indexer used to index this field.
indexer_lists: Dict[str, List[Dict[str, torch.Tensor]]] = defaultdict(list)
for tensor_dict in tensor_list:
for indexer_name, indexer_output in tensor_dict.items():
indexer_lists[indexer_name].append(indexer_output)
batched_tensors = {
# NOTE(mattg): if an indexer has its own nested structure, rather than one tensor per
# argument, then this will break. If that ever happens, we should move this to an
# `indexer.batch_tensors` method, with this logic as the default implementation in the
# base class.
indexer_name: util.batch_tensor_dicts(indexer_outputs)
for indexer_name, indexer_outputs in indexer_lists.items()
}
return batched_tensors
def __str__(self) -> str:
indexers = {
name: indexer.__class__.__name__ for name, indexer in self._token_indexers.items()
}
# Double tab to indent under the header.
formatted_text = "".join(
"\t\t" + text + "\n" for text in textwrap.wrap(repr(self.tokens), 100)
)
return (
f"TextField of length {self.sequence_length()} with "
f"text: \n {formatted_text} \t\tand TokenIndexers : {indexers}"
)
# Sequence[Token] methods
def __iter__(self) -> Iterator[Token]:
return iter(self.tokens)
def __getitem__(self, idx: int) -> Token:
return self.tokens[idx]
def __len__(self) -> int:
return len(self.tokens)
@overrides
def duplicate(self):
"""
Overrides the behavior of `duplicate` so that `self._token_indexers` won't
actually be deep-copied.
Not only would it be extremely inefficient to deep-copy the token indexers,
but it also fails in many cases since some tokenizers (like those used in
the 'transformers' lib) cannot actually be deep-copied.
"""
new = TextField(deepcopy(self.tokens), {k: v for k, v in self._token_indexers.items()})
new._indexed_tokens = deepcopy(self._indexed_tokens)
return new
| allennlp-master | allennlp/data/fields/text_field.py |
from typing import Dict
from overrides import overrides
import torch
from allennlp.data.fields.field import Field
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.common.checks import ConfigurationError
class IndexField(Field[torch.Tensor]):
"""
An `IndexField` is an index into a
:class:`~allennlp.data.fields.sequence_field.SequenceField`, as might be used for representing
a correct answer option in a list, or a span begin and span end position in a passage, for
example. Because it's an index into a :class:`SequenceField`, we take one of those as input
and use it to compute padding lengths.
# Parameters
index : `int`
The index of the answer in the :class:`SequenceField`. This is typically the "correct
answer" in some classification decision over the sequence, like where an answer span starts
in SQuAD, or which answer option is correct in a multiple choice question. A value of
`-1` means there is no label, which can be used for padding or other purposes.
sequence_field : `SequenceField`
A field containing the sequence that this `IndexField` is a pointer into.
"""
__slots__ = ["sequence_index", "sequence_field"]
def __init__(self, index: int, sequence_field: SequenceField) -> None:
self.sequence_index = index
self.sequence_field = sequence_field
if not isinstance(index, int):
raise ConfigurationError(
"IndexFields must be passed integer indices. "
"Found index: {} with type: {}.".format(index, type(index))
)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
tensor = torch.LongTensor([self.sequence_index])
return tensor
@overrides
def empty_field(self):
return IndexField(-1, self.sequence_field.empty_field())
def __str__(self) -> str:
return f"IndexField with index: {self.sequence_index}."
def __eq__(self, other) -> bool:
# Allow equality checks to ints that are the sequence index
if isinstance(other, int):
return self.sequence_index == other
return super().__eq__(other)
def __len__(self):
return 1
| allennlp-master | allennlp/data/fields/index_field.py |
from typing import Dict, List, Iterator
from overrides import overrides
from allennlp.data.fields.field import DataArray, Field
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.common.util import pad_sequence_to_length
class ListField(SequenceField[DataArray]):
"""
A `ListField` is a list of other fields. You would use this to represent, e.g., a list of
answer options that are themselves `TextFields`.
This field will get converted into a tensor that has one more mode than the items in the list.
If this is a list of `TextFields` that have shape (num_words, num_characters), this
`ListField` will output a tensor of shape (num_sentences, num_words, num_characters).
# Parameters
field_list : `List[Field]`
A list of `Field` objects to be concatenated into a single input tensor. All of the
contained `Field` objects must be of the same type.
"""
__slots__ = ["field_list"]
def __init__(self, field_list: List[Field]) -> None:
field_class_set = {field.__class__ for field in field_list}
assert (
len(field_class_set) == 1
), "ListFields must contain a single field type, found " + str(field_class_set)
# Not sure why mypy has a hard time with this type...
self.field_list: List[Field] = field_list
# Sequence[Field] methods
def __iter__(self) -> Iterator[Field]:
return iter(self.field_list)
def __getitem__(self, idx: int) -> Field:
return self.field_list[idx]
def __len__(self) -> int:
return len(self.field_list)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
for field in self.field_list:
field.count_vocab_items(counter)
@overrides
def index(self, vocab: Vocabulary):
for field in self.field_list:
field.index(vocab)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
field_lengths = [field.get_padding_lengths() for field in self.field_list]
padding_lengths = {"num_fields": len(self.field_list)}
# We take the set of all possible padding keys for all fields, rather
# than just a random key, because it is possible for fields to be empty
# when we pad ListFields.
possible_padding_keys = [
key for field_length in field_lengths for key in list(field_length.keys())
]
for key in set(possible_padding_keys):
# In order to be able to nest ListFields, we need to scope the padding length keys
# appropriately, so that nested ListFields don't all use the same "num_fields" key. So
# when we construct the dictionary from the list of fields, we add something to the
# name, and we remove it when padding the list of fields.
padding_lengths["list_" + key] = max(x[key] if key in x else 0 for x in field_lengths)
# Set minimum padding length to handle empty list fields.
for padding_key in padding_lengths:
padding_lengths[padding_key] = max(padding_lengths[padding_key], 1)
return padding_lengths
@overrides
def sequence_length(self) -> int:
return len(self.field_list)
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> DataArray:
padded_field_list = pad_sequence_to_length(
self.field_list, padding_lengths["num_fields"], self.field_list[0].empty_field
)
# Here we're removing the scoping on the padding length keys that we added in
# `get_padding_lengths`; see the note there for more detail.
child_padding_lengths = {
key.replace("list_", "", 1): value
for key, value in padding_lengths.items()
if key.startswith("list_")
}
padded_fields = [field.as_tensor(child_padding_lengths) for field in padded_field_list]
return self.field_list[0].batch_tensors(padded_fields)
@overrides
def empty_field(self):
# Our "empty" list field will actually have a single field in the list, so that we can
# correctly construct nested lists. For example, if we have a type that is
# `ListField[ListField[LabelField]]`, we need the top-level `ListField` to know to
# construct a `ListField[LabelField]` when it's padding, and the nested `ListField` needs
# to know that it's empty objects are `LabelFields`. Having an "empty" list actually have
# length one makes this all work out, and we'll always be padding to at least length 1,
# anyway.
return ListField([self.field_list[0].empty_field()])
@overrides
def batch_tensors(self, tensor_list: List[DataArray]) -> DataArray:
# We defer to the class we're wrapping in a list to handle the batching.
return self.field_list[0].batch_tensors(tensor_list)
def __str__(self) -> str:
field_class = self.field_list[0].__class__.__name__
base_string = f"ListField of {len(self.field_list)} {field_class}s : \n"
return " ".join([base_string] + [f"\t {field} \n" for field in self.field_list])
| allennlp-master | allennlp/data/fields/list_field.py |
allennlp-master | tests/__init__.py |
|
import re
import pytest
from allennlp.version import VERSION
# Regex to check that the current version set in `allennlp.version` adheres to
# PEP 440, as well as some of our own internal conventions, such as the `.dev`
# suffix being used only for nightly builds.
# 0.0.0rc0.post0.dev20200424
VALID_VERSION_RE = re.compile(
r"^"
r"(0|[1-9]\d*)" # major
r"\.(0|[1-9]\d*)" # minor
r"\.(0|[1-9]\d*)" # patch
r"(rc(0|[1-9]\d*))?" # patch suffix
r"(\.post(0|[1-9]\d*))?" # [.postN]
r"(\.dev2020[0-9]{4})?" # [.devDATE]
r"$"
)
def is_valid(version: str) -> bool:
return VALID_VERSION_RE.match(version) is not None
@pytest.mark.parametrize(
"version, valid",
[
# Valid versions:
("1.0.0", True),
("1.0.0rc3", True),
("1.0.0.post0", True),
("1.0.0.post1", True),
("1.0.0rc3.post0", True),
("1.0.0rc3.post0.dev20200424", True),
# Invalid versions:
("1.0.0.rc3", False),
("1.0.0rc01", False),
("1.0.0rc3.dev2020424", False),
],
)
def test_is_valid_helper(version: str, valid: bool):
assert is_valid(version) is valid
def test_version():
"""
Ensures current version is consistent with our conventions.
"""
assert is_valid(VERSION)
| allennlp-master | tests/version_test.py |
from typing import Dict, Optional
import os
import tempfile
import tarfile
import pytest
import torch
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.nn.initializers import PretrainedModelInitializer
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
class _Net1(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear_1 = torch.nn.Linear(5, 10)
self.linear_2 = torch.nn.Linear(10, 5)
self.scalar = torch.nn.Parameter(torch.rand(()))
def forward(self, inputs):
pass
class _Net2(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear_1 = torch.nn.Linear(5, 10)
self.linear_3 = torch.nn.Linear(10, 5)
self.scalar = torch.nn.Parameter(torch.rand(()))
def forward(self, inputs):
pass
class TestPretrainedModelInitializer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.net1 = _Net1()
self.net2 = _Net2()
self.temp_file = self.TEST_DIR / "weights.th"
torch.save(self.net2.state_dict(), self.temp_file)
def _are_equal(self, linear1: torch.nn.Linear, linear2: torch.nn.Linear) -> bool:
return torch.equal(linear1.weight, linear2.weight) and torch.equal(
linear1.bias, linear2.bias
)
def _get_applicator(
self,
regex: str,
weights_file_path: str,
parameter_name_overrides: Optional[Dict[str, str]] = None,
) -> InitializerApplicator:
initializer = PretrainedModelInitializer(weights_file_path, parameter_name_overrides)
return InitializerApplicator([(regex, initializer)])
def test_random_initialization(self):
# The tests in the class rely on the fact that the parameters for
# `self.net1` and `self.net2` are randomly initialized and not
# equal at the beginning. This test makes sure that's true
assert not self._are_equal(self.net1.linear_1, self.net2.linear_1)
assert not self._are_equal(self.net1.linear_2, self.net2.linear_3)
def test_from_params(self):
params = Params({"type": "pretrained", "weights_file_path": self.temp_file})
initializer = Initializer.from_params(params)
assert initializer.weights
assert initializer.parameter_name_overrides == {}
name_overrides = {"a": "b", "c": "d"}
params = Params(
{
"type": "pretrained",
"weights_file_path": self.temp_file,
"parameter_name_overrides": name_overrides,
}
)
initializer = Initializer.from_params(params)
assert initializer.weights
assert initializer.parameter_name_overrides == name_overrides
def test_from_params_tar_gz(self):
with tempfile.NamedTemporaryFile(suffix=".tar.gz") as f:
with tarfile.open(fileobj=f, mode="w:gz") as archive:
archive.add(self.temp_file, arcname=os.path.basename(self.temp_file))
f.flush()
params = Params({"type": "pretrained", "weights_file_path": f.name})
initializer = Initializer.from_params(params)
assert initializer.weights
assert initializer.parameter_name_overrides == {}
for name, parameter in self.net2.state_dict().items():
assert torch.equal(parameter, initializer.weights[name])
def test_default_parameter_names(self):
# This test initializes net1 to net2's parameters. It doesn't use
# the parameter name overrides, so it will verify the initialization
# works if the two parameters' names are the same.
applicator = self._get_applicator("linear_1.weight|linear_1.bias", self.temp_file)
applicator(self.net1)
assert self._are_equal(self.net1.linear_1, self.net2.linear_1)
assert not self._are_equal(self.net1.linear_2, self.net2.linear_3)
def test_parameter_name_overrides(self):
# This test will use the parameter name overrides to initialize all
# of net1's weights to net2's.
name_overrides = {"linear_2.weight": "linear_3.weight", "linear_2.bias": "linear_3.bias"}
applicator = self._get_applicator("linear_*", self.temp_file, name_overrides)
applicator(self.net1)
assert self._are_equal(self.net1.linear_1, self.net2.linear_1)
assert self._are_equal(self.net1.linear_2, self.net2.linear_3)
def test_size_mismatch(self):
# This test will verify that an exception is raised when you try
# to initialize a parameter to a pretrained parameter and they have
# different sizes
name_overrides = {"linear_1.weight": "linear_3.weight"}
applicator = self._get_applicator("linear_1.*", self.temp_file, name_overrides)
with pytest.raises(ConfigurationError):
applicator(self.net1)
def test_zero_dim_tensor(self):
# This test will verify that a 0-dim tensor can be initialized.
# It raises IndexError if slicing a tensor to copy the parameter.
applicator = self._get_applicator("scalar", self.temp_file)
applicator(self.net1)
assert torch.equal(self.net1.scalar, self.net2.scalar)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")
def test_load_to_gpu_from_gpu(self):
# This test will make sure that the initializer works on the GPU
self.net1.cuda(device=0)
self.net2.cuda(device=0)
# Verify the parameters are on the GPU
assert self.net1.linear_1.weight.is_cuda is True
assert self.net1.linear_1.bias.is_cuda is True
assert self.net2.linear_1.weight.is_cuda is True
assert self.net2.linear_1.bias.is_cuda is True
# We need to manually save the parameters to a file because setup_method()
# only does it for the CPU
temp_file = self.TEST_DIR / "gpu_weights.th"
torch.save(self.net2.state_dict(), temp_file)
applicator = self._get_applicator("linear_1.*", temp_file)
applicator(self.net1)
# Verify the parameters are still on the GPU
assert self.net1.linear_1.weight.is_cuda is True
assert self.net1.linear_1.bias.is_cuda is True
assert self.net2.linear_1.weight.is_cuda is True
assert self.net2.linear_1.bias.is_cuda is True
# Make sure the weights are identical
assert self._are_equal(self.net1.linear_1, self.net2.linear_1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")
def test_load_to_cpu_from_gpu(self):
# This test will load net2's parameters onto the GPU, then use them to
# initialize net1 on the CPU
self.net2.cuda(device=0)
# Verify the parameters are on the GPU
assert self.net2.linear_1.weight.is_cuda is True
assert self.net2.linear_1.bias.is_cuda is True
temp_file = self.TEST_DIR / "gpu_weights.th"
torch.save(self.net2.state_dict(), temp_file)
applicator = self._get_applicator("linear_1.*", temp_file)
applicator(self.net1)
# Verify the parameters are on the CPU
assert self.net1.linear_1.weight.is_cuda is False
assert self.net1.linear_1.bias.is_cuda is False
# Make sure the weights are identical
assert self._are_equal(self.net1.linear_1, self.net2.linear_1.cpu())
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")
def test_load_to_gpu_from_cpu(self):
# This test will load net1's parameters onto the GPU, then use net2's
# on the CPU to initialize net1's parameters.
self.net1.cuda(device=0)
# Verify the parameters are on the GPU
assert self.net1.linear_1.weight.is_cuda is True
assert self.net1.linear_1.bias.is_cuda is True
# net2's parameters are already saved to CPU from setup_method()
applicator = self._get_applicator("linear_1.*", self.temp_file)
applicator(self.net1)
# Verify the parameters are on the GPU
assert self.net1.linear_1.weight.is_cuda is True
assert self.net1.linear_1.bias.is_cuda is True
# Make sure the weights are identical
assert self._are_equal(self.net1.linear_1.cpu(), self.net2.linear_1)
| allennlp-master | tests/nn/pretrained_model_initializer_test.py |
import json
import logging
import math
import numpy
import pytest
import torch
import _jsonnet
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.nn.initializers import block_orthogonal, uniform_unit_scaling
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
class TestInitializers(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
logging.getLogger("allennlp.nn.initializers").disabled = False
def tearDown(self):
super().tearDown()
logging.getLogger("allennlp.nn.initializers").disabled = True
def test_from_params_string(self):
Initializer.from_params(params="eye")
def test_from_params_none(self):
Initializer.from_params(params=None)
def test_regex_matches_are_initialized_correctly(self):
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear_1_with_funky_name = torch.nn.Linear(5, 10)
self.linear_2 = torch.nn.Linear(10, 5)
self.conv = torch.nn.Conv1d(5, 5, 5)
def forward(self, inputs):
pass
# Make sure we handle regexes properly
json_params = """{"initializer": {"regexes": [
["conv", {"type": "constant", "val": 5}],
["funky_na.*bi", {"type": "constant", "val": 7}]
]}}
"""
params = Params(json.loads(_jsonnet.evaluate_snippet("", json_params)))
initializers = InitializerApplicator.from_params(params=params["initializer"])
model = Net()
initializers(model)
for parameter in model.conv.parameters():
assert torch.equal(parameter.data, torch.ones(parameter.size()) * 5)
parameter = model.linear_1_with_funky_name.bias
assert torch.equal(parameter.data, torch.ones(parameter.size()) * 7)
def test_block_orthogonal_can_initialize(self):
tensor = torch.zeros([10, 6])
block_orthogonal(tensor, [5, 3])
tensor = tensor.data.numpy()
def test_block_is_orthogonal(block) -> None:
matrix_product = block.T @ block
numpy.testing.assert_array_almost_equal(
matrix_product, numpy.eye(matrix_product.shape[-1]), 6
)
test_block_is_orthogonal(tensor[:5, :3])
test_block_is_orthogonal(tensor[:5, 3:])
test_block_is_orthogonal(tensor[5:, 3:])
test_block_is_orthogonal(tensor[5:, :3])
def test_block_orthogonal_raises_on_mismatching_dimensions(self):
tensor = torch.zeros([10, 6, 8])
with pytest.raises(ConfigurationError):
block_orthogonal(tensor, [7, 2, 1])
def test_uniform_unit_scaling_can_initialize(self):
tensor = torch.zeros([10, 6])
uniform_unit_scaling(tensor, "linear")
assert tensor.data.max() < math.sqrt(3 / 10)
assert tensor.data.min() > -math.sqrt(3 / 10)
# Check that it gets the scaling correct for relu (1.43).
uniform_unit_scaling(tensor, "relu")
assert tensor.data.max() < math.sqrt(3 / 10) * 1.43
assert tensor.data.min() > -math.sqrt(3 / 10) * 1.43
def test_regex_match_prevention_prevents_and_overrides(self):
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear_1 = torch.nn.Linear(5, 10)
self.linear_2 = torch.nn.Linear(10, 5)
# typical actual usage: modules loaded from allenlp.model.load(..)
self.linear_3_transfer = torch.nn.Linear(5, 10)
self.linear_4_transfer = torch.nn.Linear(10, 5)
self.pretrained_conv = torch.nn.Conv1d(5, 5, 5)
def forward(self, inputs):
pass
json_params = """{"initializer": {
"regexes": [
[".*linear.*", {"type": "constant", "val": 10}],
[".*conv.*", {"type": "constant", "val": 10}]
],
"prevent_regexes": [".*_transfer.*", ".*pretrained.*"]
}}
"""
params = Params(json.loads(_jsonnet.evaluate_snippet("", json_params)))
initializers = InitializerApplicator.from_params(params=params["initializer"])
model = Net()
initializers(model)
for module in [model.linear_1, model.linear_2]:
for parameter in module.parameters():
assert torch.equal(parameter.data, torch.ones(parameter.size()) * 10)
transfered_modules = [
model.linear_3_transfer,
model.linear_4_transfer,
model.pretrained_conv,
]
for module in transfered_modules:
for parameter in module.parameters():
assert not torch.equal(parameter.data, torch.ones(parameter.size()) * 10)
| allennlp-master | tests/nn/initializers_test.py |
import re
import torch
from allennlp.common.params import Params
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.nn.regularizers import L1Regularizer, L2Regularizer, RegularizerApplicator
from allennlp.common.testing import AllenNlpTestCase
class TestRegularizers(AllenNlpTestCase):
def test_l1_regularization(self):
model = torch.nn.Sequential(torch.nn.Linear(5, 10), torch.nn.Linear(10, 5))
constant_init = Initializer.from_params(Params({"type": "constant", "val": -1}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(model)
value = RegularizerApplicator([("", L1Regularizer(1.0))])(model)
# 115 because of biases.
assert value.data.numpy() == 115.0
def test_l2_regularization(self):
model = torch.nn.Sequential(torch.nn.Linear(5, 10), torch.nn.Linear(10, 5))
constant_init = Initializer.from_params(Params({"type": "constant", "val": 0.5}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(model)
value = RegularizerApplicator([("", L2Regularizer(1.0))])(model)
assert value.data.numpy() == 28.75
def test_regularizer_applicator_respects_regex_matching(self):
model = torch.nn.Sequential(torch.nn.Linear(5, 10), torch.nn.Linear(10, 5))
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(model)
value = RegularizerApplicator(
[("weight", L2Regularizer(0.5)), ("bias", L1Regularizer(1.0))]
)(model)
assert value.data.numpy() == 65.0
def test_from_params(self):
params = Params({"regexes": [("conv", "l1"), ("linear", {"type": "l2", "alpha": 10})]})
regularizer_applicator = RegularizerApplicator.from_params(params)
regularizers = regularizer_applicator._regularizers
conv = linear = None
for regex, regularizer in regularizers:
if regex == "conv":
conv = regularizer
elif regex == "linear":
linear = regularizer
assert isinstance(conv, L1Regularizer)
assert isinstance(linear, L2Regularizer)
assert linear.alpha == 10
def test_frozen_params(self):
model = torch.nn.Sequential(torch.nn.Linear(5, 10), torch.nn.Linear(10, 5))
constant_init = Initializer.from_params(Params({"type": "constant", "val": -1}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(model)
# freeze the parameters of the first linear
for name, param in model.named_parameters():
if re.search(r"0.*$", name):
param.requires_grad = False
value = RegularizerApplicator([("", L1Regularizer(1.0))])(model)
# 55 because of bias (5*10 + 5)
assert value.data.numpy() == 55
| allennlp-master | tests/nn/regularizers_test.py |
allennlp-master | tests/nn/__init__.py |
|
import numpy
import pytest
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.nn.chu_liu_edmonds import _find_cycle, decode_mst
class ChuLiuEdmondsTest(AllenNlpTestCase):
def test_find_cycle(self):
# No cycle
parents = [0, 2, 3, 0, 3]
current_nodes = [True for _ in range(5)]
has_cycle, cycle = _find_cycle(parents, 5, current_nodes)
assert not has_cycle
assert not cycle
# Cycle
parents = [0, 2, 3, 1, 3]
has_cycle, cycle = _find_cycle(parents, 5, current_nodes)
assert has_cycle
assert cycle == [1, 2, 3]
# No cycle if ignored nodes are correctly ignored.
parents = [-1, 0, 1, 4, 3]
current_nodes = [True for _ in range(5)]
current_nodes[4] = False
current_nodes[3] = False
has_cycle, cycle = _find_cycle(parents, 5, current_nodes)
assert not has_cycle
assert cycle == []
# Cycle, but excluding ignored nodes which form their own cycle.
parents = [-1, 2, 1, 4, 3]
current_nodes = [True for _ in range(5)]
current_nodes[1] = False
current_nodes[2] = False
has_cycle, cycle = _find_cycle(parents, 5, current_nodes)
assert has_cycle
assert cycle == [3, 4]
def test_mst(self):
# First, test some random cases as sanity checks.
# No label case
energy = numpy.random.rand(5, 5)
heads, types = decode_mst(energy, 5, has_labels=False)
assert not _find_cycle(heads, 5, [True] * 5)[0]
# Labeled case
energy = numpy.random.rand(3, 5, 5)
heads, types = decode_mst(energy, 5)
assert not _find_cycle(heads, 5, [True] * 5)[0]
label_id_matrix = energy.argmax(axis=0)
# Check that the labels correspond to the
# argmax of the labels for the arcs.
for child, parent in enumerate(heads):
# The first index corresponds to the symbolic
# head token, which won't necessarily have an
# argmax type.
if child == 0:
continue
assert types[child] == label_id_matrix[parent, child]
# Check wrong dimensions throw errors
with pytest.raises(ConfigurationError):
energy = numpy.random.rand(5, 5)
decode_mst(energy, 5, has_labels=True)
with pytest.raises(ConfigurationError):
energy = numpy.random.rand(3, 5, 5)
decode_mst(energy, 5, has_labels=False)
def test_mst_finds_maximum_spanning_tree(self):
energy = torch.arange(1, 10).view(1, 3, 3)
heads, _ = decode_mst(energy.numpy(), 3)
assert list(heads) == [-1, 2, 0]
| allennlp-master | tests/nn/chu_liu_edmonds_test.py |
import json
import random
from typing import NamedTuple, Any
import numpy
from numpy.testing import assert_array_almost_equal, assert_almost_equal
import torch
import pytest
from flaky import flaky
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import sanitize
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import (
ELMoTokenCharactersIndexer,
TokenCharactersIndexer,
SingleIdTokenIndexer,
)
from allennlp.nn import util
from allennlp.models import load_archive
class TestNnUtil(AllenNlpTestCase):
def test_get_sequence_lengths_from_binary_mask(self):
binary_mask = torch.tensor(
[
[True, True, True, False, False, False],
[True, True, False, False, False, False],
[True, True, True, True, True, True],
[True, False, False, False, False, False],
]
)
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
def test_get_mask_from_sequence_lengths(self):
sequence_lengths = torch.LongTensor([4, 3, 1, 4, 2])
mask = util.get_mask_from_sequence_lengths(sequence_lengths, 5).data.numpy()
assert_almost_equal(
mask,
[[1, 1, 1, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1, 0, 0, 0]],
)
def test_get_sequence_lengths_converts_to_long_tensor_and_avoids_variable_overflow(self):
# Tests the following weird behaviour in Pytorch 0.1.12
# doesn't happen for our sequence masks:
#
# mask = torch.ones([260]).bool()
# mask.sum() # equals 260.
# var_mask = t.a.V(mask)
# var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
binary_mask = torch.ones(2, 260).bool()
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.data.numpy(), numpy.array([260, 260]))
def test_clamp_tensor(self):
# Test on uncoalesced sparse tensor
i = torch.LongTensor([[0, 1, 1, 0], [2, 0, 2, 2]])
v = torch.FloatTensor([3, 4, -5, 3])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on coalesced sparse tensor
i = torch.LongTensor([[0, 1, 1], [2, 0, 2]])
v = torch.FloatTensor([3, 4, -5])
tensor = torch.sparse.FloatTensor(i, v, torch.Size([2, 3]))
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3).to_dense()
assert_almost_equal(clamped_tensor, [[0, 0, 3], [3, 0, -3]])
# Test on dense tensor
tensor = torch.tensor([[5, -4, 3], [-3, 0, -30]])
clamped_tensor = util.clamp_tensor(tensor, minimum=-3, maximum=3)
assert_almost_equal(clamped_tensor, [[3, -3, 3], [-3, 0, -3]])
def test_sort_tensor_by_length(self):
tensor = torch.rand([5, 7, 9])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 1:, :] = 0
tensor[3, 5:, :] = 0
sequence_lengths = torch.LongTensor([3, 4, 1, 5, 7])
sorted_tensor, sorted_lengths, reverse_indices, _ = util.sort_batch_by_length(
tensor, sequence_lengths
)
# Test sorted indices are padded correctly.
numpy.testing.assert_array_equal(sorted_tensor[1, 5:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[2, 4:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[3, 3:, :].data.numpy(), 0.0)
numpy.testing.assert_array_equal(sorted_tensor[4, 1:, :].data.numpy(), 0.0)
assert sorted_lengths.data.equal(torch.LongTensor([7, 5, 4, 3, 1]))
# Test restoration indices correctly recover the original tensor.
assert sorted_tensor.index_select(0, reverse_indices).data.equal(tensor.data)
def test_get_final_encoder_states(self):
encoder_outputs = torch.Tensor(
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
]
)
mask = torch.tensor([[True, True, True], [True, True, False]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=False)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 11, 12], [17, 18, 19, 20]])
final_states = util.get_final_encoder_states(encoder_outputs, mask, bidirectional=True)
assert_almost_equal(final_states.data.numpy(), [[9, 10, 3, 4], [17, 18, 15, 16]])
def test_masked_softmax_no_mask(self):
# Testing the general unmasked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 3.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.090031, 0.244728, 0.665241]])
)
assert_almost_equal(1.0, numpy.sum(vector_1d_softmaxed), decimal=6)
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, None).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.017148, 0.046613, 0.93624]]))
# Testing the unmasked 1D case where the input is all 0s.
vector_zero = torch.FloatTensor([[0.0, 0.0, 0.0]])
vector_zero_softmaxed = util.masked_softmax(vector_zero, None).data.numpy()
assert_array_almost_equal(
vector_zero_softmaxed, numpy.array([[0.33333334, 0.33333334, 0.33333334]])
)
# Testing the general unmasked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.09003057, 0.24472847, 0.66524096]]
),
)
# Testing the unmasked batched case where one of the inputs are all 0s.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]])
masked_matrix_softmaxed = util.masked_softmax(matrix, None).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array(
[[0.01714783, 0.04661262, 0.93623955], [0.33333334, 0.33333334, 0.33333334]]
),
)
def test_masked_softmax_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.0, 0.0, 0.0, 0.0]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [False, False, False]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.0, 0.0, 0.0]])
)
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[False, False, False], [True, False, True]])
masked_matrix_softmaxed = util.masked_softmax(matrix, mask).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.0, 0.0, 0.0], [0.11920292, 0.0, 0.88079708]])
)
def test_masked_softmax_memory_efficient_masked(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
vector_1d_softmaxed, numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the masked 1D case where there are large elements in the
# padding.
vector_1d = torch.FloatTensor([[1.0, 1.0, 1e5]])
mask_1d = torch.tensor([[True, True, False]])
vector_1d_softmaxed = util.masked_softmax(
vector_1d, mask_1d, memory_efficient=True
).data.numpy()
assert_array_almost_equal(vector_1d_softmaxed, numpy.array([[0.5, 0.5, 0]]))
# Testing the general masked batched case.
matrix = torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382], [0.090031, 0.244728, 0.665241]]),
)
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed, numpy.array([[0.5, 0.0, 0.5], [0.090031, 0.244728, 0.665241]])
)
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[True, False, True], [False, False, False]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.5, 0.0, 0.5], [0.33333333, 0.33333333, 0.33333333]]),
)
matrix = torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
mask = torch.tensor([[False, False, False], [True, False, True]])
masked_matrix_softmaxed = util.masked_softmax(
matrix, mask, memory_efficient=True
).data.numpy()
assert_array_almost_equal(
masked_matrix_softmaxed,
numpy.array([[0.33333333, 0.33333333, 0.33333333], [0.11920292, 0.0, 0.88079708]]),
)
def test_masked_log_softmax_masked(self):
# Tests replicated from test_softmax_masked - we test that exponentiated,
# the log softmax contains the correct elements (masked elements should be == 1).
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([[1.0, 2.0, 5.0]])
mask_1d = torch.tensor([[True, False, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.01798621, 0.0, 0.98201382]])
)
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[True, False, True, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.01321289, 0.0, 0.26538793, 0.72139918]])
)
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]])
mask_1d = torch.tensor([[False, False, False, True]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert_array_almost_equal(
numpy.exp(vector_1d_softmaxed), numpy.array([[0.0, 0.0, 0.0, 1.0]])
)
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s. The output here will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]])
mask_1d = torch.tensor([[False, False, False, False]])
vector_1d_softmaxed = util.masked_log_softmax(vector_1d, mask_1d).data.numpy()
assert not numpy.isnan(vector_1d_softmaxed).any()
def test_masked_max(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([True, False, True])
vector_1d_maxed = util.masked_max(vector_1d, mask_1d, dim=0).data.numpy()
assert_array_almost_equal(vector_1d_maxed, 5.0)
# Testing if all masks are zero, the output will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([False, False, False])
vector_1d_maxed = util.masked_max(vector_1d, mask_1d, dim=0).data.numpy()
assert not numpy.isnan(vector_1d_maxed).any()
# Testing batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_maxed = util.masked_max(matrix, mask, dim=-1).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([5.0, -1.0]))
# Testing keepdim for batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_maxed = util.masked_max(matrix, mask, dim=-1, keepdim=True).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([[5.0], [-1.0]]))
# Testing broadcast
matrix = torch.FloatTensor(
[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0, 8.0]]]
)
mask = torch.tensor([[True, False, True], [True, True, False]]).unsqueeze(-1)
matrix_maxed = util.masked_max(matrix, mask, dim=1).data.numpy()
assert_array_almost_equal(matrix_maxed, numpy.array([[5.0, 2.0], [-1.0, -0.5]]))
def test_masked_mean(self):
# Testing the general masked 1D case.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([True, False, True])
vector_1d_mean = util.masked_mean(vector_1d, mask_1d, dim=0).data.numpy()
assert_array_almost_equal(vector_1d_mean, 3.0)
# Testing if all masks are zero, the output will be arbitrary, but it should not be nan.
vector_1d = torch.FloatTensor([1.0, 12.0, 5.0])
mask_1d = torch.tensor([False, False, False])
vector_1d_mean = util.masked_mean(vector_1d, mask_1d, dim=0).data.numpy()
assert not numpy.isnan(vector_1d_mean).any()
# Testing batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_mean = util.masked_mean(matrix, mask, dim=-1).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([3.0, -1.5]))
# Testing keepdim for batch value and batch masks
matrix = torch.FloatTensor([[1.0, 12.0, 5.0], [-1.0, -2.0, 3.0]])
mask = torch.tensor([[True, False, True], [True, True, False]])
matrix_mean = util.masked_mean(matrix, mask, dim=-1, keepdim=True).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([[3.0], [-1.5]]))
# Testing broadcast
matrix = torch.FloatTensor(
[[[1.0, 2.0], [12.0, 3.0], [5.0, -1.0]], [[-1.0, -3.0], [-2.0, -0.5], [3.0, 8.0]]]
)
mask = torch.tensor([[True, False, True], [True, True, False]]).unsqueeze(-1)
matrix_mean = util.masked_mean(matrix, mask, dim=1).data.numpy()
assert_array_almost_equal(matrix_mean, numpy.array([[3.0, 0.5], [-1.5, -1.75]]))
def test_masked_flip(self):
tensor = torch.FloatTensor(
[[[6, 6, 6], [1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4], [5, 5, 5]]]
)
solution = [[[6, 6, 6], [0, 0, 0]], [[4, 4, 4], [3, 3, 3]]]
response = util.masked_flip(tensor, [1, 2])
assert_almost_equal(response, solution)
tensor = torch.FloatTensor(
[
[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]],
[[3, 3, 3], [4, 4, 4], [5, 5, 5], [1, 2, 3]],
]
)
solution = [
[[2, 2, 2], [1, 1, 1], [6, 6, 6], [0, 0, 0]],
[[1, 2, 3], [5, 5, 5], [4, 4, 4], [3, 3, 3]],
]
response = util.masked_flip(tensor, [3, 4])
assert_almost_equal(response, solution)
tensor = torch.FloatTensor(
[
[[6, 6, 6], [1, 1, 1], [2, 2, 2], [0, 0, 0]],
[[3, 3, 3], [4, 4, 4], [5, 5, 5], [1, 2, 3]],
[[1, 1, 1], [2, 2, 2], [0, 0, 0], [0, 0, 0]],
]
)
solution = [
[[2, 2, 2], [1, 1, 1], [6, 6, 6], [0, 0, 0]],
[[1, 2, 3], [5, 5, 5], [4, 4, 4], [3, 3, 3]],
[[2, 2, 2], [1, 1, 1], [0, 0, 0], [0, 0, 0]],
]
response = util.masked_flip(tensor, [3, 4, 2])
assert_almost_equal(response, solution)
def test_get_text_field_mask_returns_a_correct_mask(self):
text_field_tensors = {
"indexer_name": {
"tokens": torch.LongTensor([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]),
"token_characters": torch.LongTensor(
[
[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]],
[[5, 0], [4, 6], [0, 0], [0, 0], [0, 0]],
]
),
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(),
[[1, 1, 1, 0, 0], [1, 1, 0, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_custom_padding_id(self):
text_field_tensors = {
"indexer_name": {
"tokens": torch.LongTensor([[3, 4, 5, 9, 9], [1, 2, 9, 9, 9]]),
"token_characters": torch.LongTensor(
[
[[1, 2], [3, 9], [2, 9], [9, 9], [9, 9]],
[[5, 9], [4, 6], [9, 9], [9, 9], [9, 9]],
]
),
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors, padding_id=9).long().numpy(),
[[1, 1, 1, 0, 0], [1, 1, 0, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_character_only_input(self):
text_field_tensors = {
"indexer_name": {
"token_characters": torch.LongTensor(
[
[[1, 2, 3], [3, 0, 1], [2, 1, 0], [0, 0, 0]],
[[5, 5, 5], [4, 6, 0], [0, 0, 0], [0, 0, 0]],
]
)
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(),
[[1, 1, 1, 0], [1, 1, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_character_only_input_custom_padding_id(
self,
):
text_field_tensors = {
"indexer_name": {
"token_characters": torch.LongTensor(
[
[[1, 2, 3], [3, 9, 1], [2, 1, 9], [9, 9, 9]],
[[5, 5, 5], [4, 6, 9], [9, 9, 9], [9, 9, 9]],
]
)
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors, padding_id=9).long().numpy(),
[[1, 1, 1, 0], [1, 1, 0, 0]],
)
def test_get_text_field_mask_returns_a_correct_mask_list_field(self):
text_field_tensors = {
"indexer_name": {
"list_tokens": torch.LongTensor(
[
[[1, 2], [3, 0], [2, 0], [0, 0], [0, 0]],
[[5, 0], [4, 6], [0, 0], [0, 0], [0, 0]],
]
)
}
}
actual_mask = (
util.get_text_field_mask(text_field_tensors, num_wrapping_dims=1).long().numpy()
)
expected_mask = (text_field_tensors["indexer_name"]["list_tokens"].numpy() > 0).astype(
"int32"
)
assert_almost_equal(actual_mask, expected_mask)
def test_get_text_field_mask_returns_mask_key(self):
text_field_tensors = {
"indexer_name": {
"tokens": torch.LongTensor([[3, 4, 5, 0, 0], [1, 2, 0, 0, 0]]),
"mask": torch.tensor([[False, False, True]]),
}
}
assert_almost_equal(
util.get_text_field_mask(text_field_tensors).long().numpy(), [[0, 0, 1]]
)
def test_weighted_sum_works_on_simple_input(self):
batch_size = 1
sentence_length = 5
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, sentence_length, embedding_dim)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.FloatTensor([[0.3, 0.4, 0.1, 0, 1.2]])
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, embedding_dim)
expected_array = (
0.3 * sentence_array[0, 0]
+ 0.4 * sentence_array[0, 1]
+ 0.1 * sentence_array[0, 2]
+ 0.0 * sentence_array[0, 3]
+ 1.2 * sentence_array[0, 4]
)
numpy.testing.assert_almost_equal(aggregated_array, [expected_array], decimal=5)
def test_weighted_sum_handles_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_1, length_2, length_3, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
expected_array = (
attention_array[0, 3, 2, 0] * sentence_array[0, 3, 2, 0]
+ attention_array[0, 3, 2, 1] * sentence_array[0, 3, 2, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, 3, 2], expected_array, decimal=5)
def test_weighted_sum_handles_uneven_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_3, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
for i in range(length_1):
for j in range(length_2):
expected_array = (
attention_array[0, i, j, 0] * sentence_array[0, 0]
+ attention_array[0, i, j, 1] * sentence_array[0, 1]
)
numpy.testing.assert_almost_equal(
aggregated_array[0, i, j], expected_array, decimal=5
)
def test_weighted_sum_handles_3d_attention_with_3d_matrix(self):
batch_size = 1
length_1 = 5
length_2 = 2
embedding_dim = 4
sentence_array = numpy.random.rand(batch_size, length_2, embedding_dim)
attention_array = numpy.random.rand(batch_size, length_1, length_2)
sentence_tensor = torch.from_numpy(sentence_array).float()
attention_tensor = torch.from_numpy(attention_array).float()
aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
assert aggregated_array.shape == (batch_size, length_1, embedding_dim)
for i in range(length_1):
expected_array = (
attention_array[0, i, 0] * sentence_array[0, 0]
+ attention_array[0, i, 1] * sentence_array[0, 1]
)
numpy.testing.assert_almost_equal(aggregated_array[0, i], expected_array, decimal=5)
def test_viterbi_decode(self):
# Test Viterbi decoding is equal to greedy decoding with no pairwise potentials.
sequence_logits = torch.nn.functional.softmax(torch.rand([5, 9]), dim=-1)
transition_matrix = torch.zeros([9, 9])
indices, _ = util.viterbi_decode(sequence_logits.data, transition_matrix)
_, argmax_indices = torch.max(sequence_logits, 1)
assert indices == argmax_indices.data.squeeze().tolist()
# Test Viterbi decoding works with start and end transitions
sequence_logits = torch.nn.functional.softmax(torch.rand([5, 9]), dim=-1)
transition_matrix = torch.zeros([9, 9])
allowed_start_transitions = torch.zeros([9])
# Force start tag to be an 8
allowed_start_transitions[:8] = float("-inf")
allowed_end_transitions = torch.zeros([9])
# Force end tag to be a 0
allowed_end_transitions[1:] = float("-inf")
indices, _ = util.viterbi_decode(
sequence_logits.data,
transition_matrix,
allowed_end_transitions=allowed_end_transitions,
allowed_start_transitions=allowed_start_transitions,
)
assert indices[0] == 8
assert indices[-1] == 0
# Test that pairwise potentials affect the sequence correctly and that
# viterbi_decode can handle -inf values.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 3, 5],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
]
)
# The same tags shouldn't appear sequentially.
transition_matrix = torch.zeros([5, 5])
for i in range(5):
transition_matrix[i, i] = float("-inf")
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [4, 3, 4, 3, 4, 3]
# Test that unbalanced pairwise potentials break ties
# between paths with equal unary potentials.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path uniquely to take the 4th tag only.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -10
transition_matrix[3, 4] = -10
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [3, 3, 3, 3, 3, 3]
sequence_logits = torch.FloatTensor([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])
# Best path would normally be [3, 2, 3] but we add a
# potential from 2 -> 1, making [3, 2, 1] the best path.
transition_matrix = torch.zeros([4, 4])
transition_matrix[0, 0] = 1
transition_matrix[2, 1] = 5
indices, value = util.viterbi_decode(sequence_logits, transition_matrix)
assert indices == [3, 2, 1]
assert value.numpy() == 18
# Test that providing evidence results in paths containing specified tags.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
[0, 0, 0, 7, 7],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path to take the 4th tag for every label.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -2
transition_matrix[3, 4] = -2
# The 1st, 4th and 5th sequence elements are observed - they should be
# equal to 2, 0 and 4. The last tag should be equal to 3, because although
# the penalty for transitioning to the 4th tag is -2, the unary potential
# is 7, which is greater than the combination for any of the other labels.
observations = [2, -1, -1, 0, 4, -1]
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, observations)
assert indices == [2, 3, 3, 0, 4, 3]
def test_viterbi_decode_top_k(self):
# Test cases taken from: https://gist.github.com/PetrochukM/afaa3613a99a8e7213d2efdd02ae4762
# Test Viterbi decoding is equal to greedy decoding with no pairwise potentials.
sequence_logits = torch.autograd.Variable(torch.rand([5, 9]))
transition_matrix = torch.zeros([9, 9])
indices, _ = util.viterbi_decode(sequence_logits.data, transition_matrix, top_k=5)
_, argmax_indices = torch.max(sequence_logits, 1)
assert indices[0] == argmax_indices.data.squeeze().tolist()
# Test that pairwise potentials effect the sequence correctly and that
# viterbi_decode can handle -inf values.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
[0, 0, 0, 3, 4],
]
)
# The same tags shouldn't appear sequentially.
transition_matrix = torch.zeros([5, 5])
for i in range(5):
transition_matrix[i, i] = float("-inf")
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 4, 3, 4, 3, 4]
# Test that unbalanced pairwise potentials break ties
# between paths with equal unary potentials.
sequence_logits = torch.FloatTensor(
[
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 4],
[0, 0, 0, 4, 0],
]
)
# The 5th tag has a penalty for appearing sequentially
# or for transitioning to the 4th tag, making the best
# path uniquely to take the 4th tag only.
transition_matrix = torch.zeros([5, 5])
transition_matrix[4, 4] = -10
transition_matrix[4, 3] = -10
indices, _ = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 3, 3, 3, 3, 3]
sequence_logits = torch.FloatTensor([[1, 0, 0, 4], [1, 0, 6, 2], [0, 3, 0, 4]])
# Best path would normally be [3, 2, 3] but we add a
# potential from 2 -> 1, making [3, 2, 1] the best path.
transition_matrix = torch.zeros([4, 4])
transition_matrix[0, 0] = 1
transition_matrix[2, 1] = 5
indices, value = util.viterbi_decode(sequence_logits, transition_matrix, top_k=5)
assert indices[0] == [3, 2, 1]
assert value[0] == 18
def _brute_decode(
tag_sequence: torch.Tensor, transition_matrix: torch.Tensor, top_k: int = 5
) -> Any:
"""
Top-k decoder that uses brute search instead of the Viterbi Decode dynamic programing algorithm
"""
# Create all possible sequences
sequences = [[]] # type: ignore
for i in range(len(tag_sequence)):
new_sequences = [] # type: ignore
for j in range(len(tag_sequence[i])):
for sequence in sequences:
new_sequences.append(sequence[:] + [j])
sequences = new_sequences
# Score
scored_sequences = [] # type: ignore
for sequence in sequences:
emission_score = sum(tag_sequence[i, j] for i, j in enumerate(sequence))
transition_score = sum(
transition_matrix[sequence[i - 1], sequence[i]] for i in range(1, len(sequence))
)
score = emission_score + transition_score
scored_sequences.append((score, sequence))
# Get the top k scores / paths
top_k_sequences = sorted(scored_sequences, key=lambda r: r[0], reverse=True)[:top_k]
scores, paths = zip(*top_k_sequences)
return paths, scores # type: ignore
# Run 100 randomly generated parameters and compare the outputs.
for i in range(100):
num_tags = random.randint(1, 5)
seq_len = random.randint(1, 5)
k = random.randint(1, 5)
sequence_logits = torch.rand([seq_len, num_tags])
transition_matrix = torch.rand([num_tags, num_tags])
viterbi_paths_v1, viterbi_scores_v1 = util.viterbi_decode(
sequence_logits, transition_matrix, top_k=k
)
viterbi_path_brute, viterbi_score_brute = _brute_decode(
sequence_logits, transition_matrix, top_k=k
)
numpy.testing.assert_almost_equal(
list(viterbi_score_brute), viterbi_scores_v1.tolist(), decimal=3
)
numpy.testing.assert_equal(sanitize(viterbi_paths_v1), viterbi_path_brute)
def test_sequence_cross_entropy_with_logits_masks_loss_correctly(self):
# test weight masking by checking that a tensor with non-zero values in
# masked positions returns the same loss as a tensor with zeros in those
# positions.
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
tensor2 = tensor.clone()
tensor2[0, 3:, :] = 2
tensor2[1, 4:, :] = 13
tensor2[2, 2:, :] = 234
tensor2[3, :, :] = 65
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights)
loss2 = util.sequence_cross_entropy_with_logits(tensor2, targets, weights)
assert loss.data.numpy() == loss2.data.numpy()
def test_sequence_cross_entropy_with_logits_smooths_labels_correctly(self):
tensor = torch.rand([1, 3, 4])
targets = torch.LongTensor(numpy.random.randint(0, 3, [1, 3]))
weights = torch.ones([2, 3])
loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, label_smoothing=0.1
)
correct_loss = 0.0
for prediction, label in zip(tensor.squeeze(0), targets.squeeze(0)):
prediction = torch.nn.functional.log_softmax(prediction, dim=-1)
correct_loss += prediction[label] * 0.9
# incorrect elements
correct_loss += prediction.sum() * 0.1 / 4
# Average over sequence.
correct_loss = -correct_loss / 3
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_averages_batch_correctly(self):
# test batch average is the same as dividing the batch averaged
# loss by the number of batches containing any non-padded tokens.
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights)
vector_loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, average=None
)
# Batch has one completely padded row, so divide by 4.
assert loss.data.numpy() == vector_loss.sum().item() / 4
@flaky(max_runs=3, min_passes=1)
def test_sequence_cross_entropy_with_logits_averages_token_correctly(self):
# test token average is the same as multiplying the per-batch loss
# with the per-batch weights and dividing by the total weight
tensor = torch.rand([5, 7, 4])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, :, :] = 0
weights = (tensor != 0.0)[:, :, 0].long().squeeze(-1)
targets = torch.LongTensor(numpy.random.randint(0, 3, [5, 7]))
targets *= weights
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, average="token")
vector_loss = util.sequence_cross_entropy_with_logits(
tensor, targets, weights, average=None
)
total_token_loss = (vector_loss * weights.float().sum(dim=-1)).sum()
average_token_loss = (total_token_loss / weights.float().sum()).detach()
assert_almost_equal(loss.detach().item(), average_token_loss.item(), decimal=5)
def test_sequence_cross_entropy_with_logits_gamma_correctly(self):
batch = 1
length = 3
classes = 4
gamma = abs(numpy.random.randn()) # [0, +inf)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, gamma=gamma)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
p = torch.nn.functional.softmax(logit, dim=-1)
pt = p[label]
ft = (1 - pt) ** gamma
correct_loss += -pt.log() * ft
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_float_correctly(self):
batch = 1
length = 3
classes = 2 # alpha float for binary class only
alpha = (
numpy.random.rand() if numpy.random.rand() > 0.5 else (1.0 - numpy.random.rand())
) # [0, 1]
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
if label:
at = alpha
else:
at = 1 - alpha
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_single_float_correctly(self):
batch = 1
length = 3
classes = 2 # alpha float for binary class only
alpha = (
numpy.random.rand() if numpy.random.rand() > 0.5 else (1.0 - numpy.random.rand())
) # [0, 1]
alpha = torch.tensor(alpha)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
if label:
at = alpha
else:
at = 1 - alpha
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_sequence_cross_entropy_with_logits_alpha_list_correctly(self):
batch = 1
length = 3
classes = 4 # alpha float for binary class only
alpha = abs(numpy.random.randn(classes)) # [0, +inf)
tensor = torch.rand([batch, length, classes])
targets = torch.LongTensor(numpy.random.randint(0, classes, [batch, length]))
weights = torch.ones([batch, length])
loss = util.sequence_cross_entropy_with_logits(tensor, targets, weights, alpha=alpha)
correct_loss = 0.0
for logit, label in zip(tensor.squeeze(0), targets.squeeze(0)):
logp = torch.nn.functional.log_softmax(logit, dim=-1)
logpt = logp[label]
at = alpha[label]
correct_loss += -logpt * at
# Average over sequence.
correct_loss = correct_loss / length
numpy.testing.assert_array_almost_equal(loss.data.numpy(), correct_loss.data.numpy())
def test_replace_masked_values_replaces_masked_values_with_finite_value(self):
tensor = torch.FloatTensor([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]])
mask = torch.tensor([[True, True, False]])
replaced = util.replace_masked_values(tensor, mask.unsqueeze(-1), 2).data.numpy()
assert_almost_equal(replaced, [[[1, 2, 3, 4], [5, 6, 7, 8], [2, 2, 2, 2]]])
def test_logsumexp(self):
# First a simple example where we add probabilities in log space.
tensor = torch.FloatTensor([[0.4, 0.1, 0.2]])
log_tensor = tensor.log()
log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=False)
assert_almost_equal(log_summed.exp().data.numpy(), [0.7])
log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=True)
assert_almost_equal(log_summed.exp().data.numpy(), [[0.7]])
# Then some more atypical examples, and making sure this will work with how we handle
# log masks.
tensor = torch.FloatTensor([[float("-inf"), 20.0]])
assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
tensor = torch.FloatTensor([[-200.0, 20.0]])
assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
tensor = torch.FloatTensor([[20.0, 20.0], [-200.0, 200.0]])
assert_almost_equal(util.logsumexp(tensor, dim=0).data.numpy(), [20.0, 200.0])
def test_flatten_and_batch_shift_indices(self):
indices = numpy.array(
[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 9, 9, 9]], [[2, 1, 0, 7], [7, 7, 2, 3], [0, 0, 4, 2]]]
)
indices = torch.tensor(indices, dtype=torch.long)
shifted_indices = util.flatten_and_batch_shift_indices(indices, 10)
numpy.testing.assert_array_equal(
shifted_indices.data.numpy(),
numpy.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 12, 11, 10, 17, 17, 17, 12, 13, 10, 10, 14, 12]
),
)
def test_batched_index_select(self):
indices = numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Each element is a vector of its index.
targets = torch.ones([2, 10, 3]).cumsum(1) - 1
# Make the second batch double its index so they're different.
targets[1, :, :] *= 2
indices = torch.tensor(indices, dtype=torch.long)
selected = util.batched_index_select(targets, indices)
assert list(selected.size()) == [2, 2, 2, 3]
ones = numpy.ones([3])
numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 10)
numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 12)
numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 14)
numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 16)
indices = numpy.array([[[1, 11], [3, 4]], [[5, 6], [7, 8]]])
indices = torch.tensor(indices, dtype=torch.long)
with pytest.raises(ConfigurationError):
util.batched_index_select(targets, indices)
indices = numpy.array([[[1, -1], [3, 4]], [[5, 6], [7, 8]]])
indices = torch.tensor(indices, dtype=torch.long)
with pytest.raises(ConfigurationError):
util.batched_index_select(targets, indices)
def test_masked_index_fill(self):
targets = torch.zeros([3, 5])
indices = torch.tensor([[4, 2, 3, -1], [0, 1, -1, -1], [1, 3, -1, -1]])
mask = indices >= 0
filled = util.masked_index_fill(targets, indices, mask)
numpy.testing.assert_array_equal(
filled, [[0, 0, 1, 1, 1], [1, 1, 0, 0, 0], [0, 1, 0, 1, 0]]
)
def test_masked_index_replace(self):
targets = torch.zeros([3, 5, 2])
indices = torch.tensor([[4, 2, 3, -1], [0, 1, -1, -1], [3, 1, -1, -1]])
replace_with = (
torch.arange(indices.numel())
.float()
.reshape(indices.shape)
.unsqueeze(-1)
.expand(indices.shape + (2,))
)
mask = indices >= 0
replaced = util.masked_index_replace(targets, indices, mask, replace_with)
numpy.testing.assert_array_equal(
replaced,
[
[[0, 0], [0, 0], [1, 1], [2, 2], [0, 0]],
[[4, 4], [5, 5], [0, 0], [0, 0], [0, 0]],
[[0, 0], [9, 9], [0, 0], [8, 8], [0, 0]],
],
)
def test_batched_span_select(self):
# Each element is a vector of its index.
targets = torch.ones([3, 12, 2]).cumsum(1) - 1
spans = torch.LongTensor(
[
[[0, 0], [1, 2], [5, 8], [10, 10]],
[[i, i] for i in range(3, -1, -1)],
[[0, 3], [1, 4], [2, 5], [10, 11]],
]
)
selected, mask = util.batched_span_select(targets, spans)
selected = torch.where(mask.unsqueeze(-1), selected, torch.empty_like(selected).fill_(-1))
numpy.testing.assert_array_equal(
selected,
[
[
[[0, 0], [-1, -1], [-1, -1], [-1, -1]],
[[1, 1], [2, 2], [-1, -1], [-1, -1]],
[[5, 5], [6, 6], [7, 7], [8, 8]],
[[10, 10], [-1, -1], [-1, -1], [-1, -1]],
],
[[[i, i], [-1, -1], [-1, -1], [-1, -1]] for i in range(3, -1, -1)],
[
[[0, 0], [1, 1], [2, 2], [3, 3]],
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[2, 2], [3, 3], [4, 4], [5, 5]],
[[10, 10], [11, 11], [-1, -1], [-1, -1]],
],
],
)
def test_flattened_index_select(self):
indices = numpy.array([[1, 2], [3, 4]])
targets = torch.ones([2, 6, 3]).cumsum(1) - 1
# Make the second batch double its index so they're different.
targets[1, :, :] *= 2
indices = torch.tensor(indices, dtype=torch.long)
selected = util.flattened_index_select(targets, indices)
assert list(selected.size()) == [2, 2, 2, 3]
ones = numpy.ones([3])
numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 6)
numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 8)
# Check we only accept 2D indices.
with pytest.raises(ConfigurationError):
util.flattened_index_select(targets, torch.ones([3, 4, 5]))
def test_bucket_values(self):
indices = torch.LongTensor([1, 2, 7, 1, 56, 900])
bucketed_distances = util.bucket_values(indices)
numpy.testing.assert_array_equal(
bucketed_distances.numpy(), numpy.array([1, 2, 5, 1, 8, 9])
)
def test_add_sentence_boundary_token_ids_handles_2D_input(self):
tensor = torch.from_numpy(numpy.array([[1, 2, 3], [4, 5, 0]]))
mask = tensor > 0
bos = 9
eos = 10
new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
expected_new_tensor = numpy.array([[9, 1, 2, 3, 10], [9, 4, 5, 10, 0]])
assert (new_tensor.data.numpy() == expected_new_tensor).all()
assert (new_mask.data.numpy() == (expected_new_tensor > 0)).all()
def test_add_sentence_boundary_token_ids_handles_3D_input(self):
tensor = torch.from_numpy(
numpy.array(
[
[[1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2]],
[[4, 3, 2, 1], [8, 7, 6, 5], [0, 0, 0, 0]],
]
)
)
mask = (tensor > 0).sum(dim=-1) > 0
bos = torch.from_numpy(numpy.array([9, 9, 9, 9]))
eos = torch.from_numpy(numpy.array([10, 10, 10, 10]))
new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
expected_new_tensor = numpy.array(
[
[[9, 9, 9, 9], [1, 2, 3, 4], [5, 5, 5, 5], [6, 8, 1, 2], [10, 10, 10, 10]],
[[9, 9, 9, 9], [4, 3, 2, 1], [8, 7, 6, 5], [10, 10, 10, 10], [0, 0, 0, 0]],
]
)
assert (new_tensor.data.numpy() == expected_new_tensor).all()
assert (new_mask.data.numpy() == ((expected_new_tensor > 0).sum(axis=-1) > 0)).all()
def test_remove_sentence_boundaries(self):
tensor = torch.from_numpy(numpy.random.rand(3, 5, 7))
mask = torch.from_numpy(
# The mask with two elements is to test the corner case
# of an empty sequence, so here we are removing boundaries
# from "<S> </S>"
numpy.array([[1, 1, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
).bool()
new_tensor, new_mask = util.remove_sentence_boundaries(tensor, mask)
expected_new_tensor = torch.zeros(3, 3, 7)
expected_new_tensor[1, 0:3, :] = tensor[1, 1:4, :]
expected_new_tensor[2, 0:2, :] = tensor[2, 1:3, :]
assert_array_almost_equal(new_tensor.data.numpy(), expected_new_tensor.data.numpy())
expected_new_mask = torch.from_numpy(numpy.array([[0, 0, 0], [1, 1, 1], [1, 1, 0]])).bool()
assert (new_mask.data.numpy() == expected_new_mask.data.numpy()).all()
def test_add_positional_features(self):
# This is hard to test, so we check that we get the same result as the
# original tensorflow implementation:
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py#L270
tensor2tensor_result = numpy.asarray(
[
[0.00000000e00, 0.00000000e00, 1.00000000e00, 1.00000000e00],
[8.41470957e-01, 9.99999902e-05, 5.40302277e-01, 1.00000000e00],
[9.09297407e-01, 1.99999980e-04, -4.16146845e-01, 1.00000000e00],
]
)
tensor = torch.zeros([2, 3, 4])
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
# Check case with odd number of dimensions.
tensor2tensor_result = numpy.asarray(
[
[
0.00000000e00,
0.00000000e00,
0.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
0.00000000e00,
],
[
8.41470957e-01,
9.99983307e-03,
9.99999902e-05,
5.40302277e-01,
9.99949992e-01,
1.00000000e00,
0.00000000e00,
],
[
9.09297407e-01,
1.99986659e-02,
1.99999980e-04,
-4.16146815e-01,
9.99800026e-01,
1.00000000e00,
0.00000000e00,
],
]
)
tensor = torch.zeros([2, 3, 7])
result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
numpy.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
numpy.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
def test_combine_tensors_and_multiply(self):
tensors = [torch.Tensor([[[2, 3]]]), torch.Tensor([[[5, 5]]])]
weight = torch.Tensor([4, 5])
combination = "x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[8 + 15]]
)
combination = "y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[20 + 25]]
)
combination = "x,y"
weight2 = torch.Tensor([4, 5, 4, 5])
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight2), [[8 + 20 + 15 + 25]]
)
combination = "x-y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[-3 * 4 + -2 * 5]]
)
combination = "y-x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[3 * 4 + 2 * 5]]
)
combination = "y+x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[7 * 4 + 8 * 5]]
)
combination = "y*x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight), [[10 * 4 + 15 * 5]]
)
combination = "y/x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[(5 / 2) * 4 + (5 / 3) * 5]],
decimal=4,
)
combination = "x/y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[(2 / 5) * 4 + (3 / 5) * 5]],
decimal=4,
)
with pytest.raises(ConfigurationError):
util.combine_tensors_and_multiply("x+y+y", tensors, weight)
with pytest.raises(ConfigurationError):
util.combine_tensors_and_multiply("x%y", tensors, weight)
def test_combine_tensors_and_multiply_with_same_batch_size_and_embedding_dim(self):
# This test just makes sure we handle some potential edge cases where the lengths of all
# dimensions are the same, making sure that the multiplication with the weight vector
# happens along the right dimension (it should be the last one).
tensors = [torch.Tensor([[[5, 5], [4, 4]], [[2, 3], [1, 1]]])] # (2, 2, 2)
weight = torch.Tensor([4, 5]) # (2,)
combination = "x"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[[20 + 25, 16 + 20], [8 + 15, 4 + 5]],
)
tensors = [
torch.Tensor([[[5, 5], [2, 2]], [[4, 4], [3, 3]]]),
torch.Tensor([[[2, 3]], [[1, 1]]]),
]
weight = torch.Tensor([4, 5])
combination = "x*y"
assert_almost_equal(
util.combine_tensors_and_multiply(combination, tensors, weight),
[
[5 * 2 * 4 + 5 * 3 * 5, 2 * 2 * 4 + 2 * 3 * 5],
[4 * 1 * 4 + 4 * 1 * 5, 3 * 1 * 4 + 3 * 1 * 5],
],
)
def test_combine_tensors_and_multiply_with_batch_size_one(self):
seq_len_1 = 10
seq_len_2 = 5
embedding_dim = 8
combination = "x,y,x*y"
t1 = torch.randn(1, seq_len_1, embedding_dim)
t2 = torch.randn(1, seq_len_2, embedding_dim)
combined_dim = util.get_combined_dim(combination, [embedding_dim, embedding_dim])
weight = torch.Tensor(combined_dim)
result = util.combine_tensors_and_multiply(
combination, [t1.unsqueeze(2), t2.unsqueeze(1)], weight
)
assert_almost_equal(result.size(), [1, seq_len_1, seq_len_2])
def test_combine_tensors_and_multiply_with_batch_size_one_and_seq_len_one(self):
seq_len_1 = 10
seq_len_2 = 1
embedding_dim = 8
combination = "x,y,x*y"
t1 = torch.randn(1, seq_len_1, embedding_dim)
t2 = torch.randn(1, seq_len_2, embedding_dim)
combined_dim = util.get_combined_dim(combination, [embedding_dim, embedding_dim])
weight = torch.Tensor(combined_dim)
result = util.combine_tensors_and_multiply(
combination, [t1.unsqueeze(2), t2.unsqueeze(1)], weight
)
assert_almost_equal(result.size(), [1, seq_len_1, seq_len_2])
def test_has_tensor(self):
has_tensor = util.has_tensor
tensor = torch.tensor([1, 2, 3])
assert has_tensor(["a", 10, tensor])
assert not has_tensor(["a", 10])
assert has_tensor(("a", 10, tensor))
assert not has_tensor(("a", 10))
assert has_tensor({"a": tensor, "b": 1})
assert not has_tensor({"a": 10, "b": 1})
assert has_tensor(tensor)
assert not has_tensor(3)
assert has_tensor({"x": [0, {"inside": {"double_inside": [3, [10, tensor]]}}]})
def test_combine_initial_dims(self):
tensor = torch.randn(4, 10, 20, 17, 5)
tensor2d = util.combine_initial_dims(tensor)
assert list(tensor2d.size()) == [4 * 10 * 20 * 17, 5]
def test_uncombine_initial_dims(self):
embedding2d = torch.randn(4 * 10 * 20 * 17 * 5, 12)
embedding = util.uncombine_initial_dims(embedding2d, torch.Size((4, 10, 20, 17, 5)))
assert list(embedding.size()) == [4, 10, 20, 17, 5, 12]
def test_inspect_model_parameters(self):
model_archive = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
parameters_inspection = str(
self.FIXTURES_ROOT / "basic_classifier" / "parameters_inspection.json"
)
model = load_archive(model_archive).model
with open(parameters_inspection) as file:
parameters_inspection_dict = json.load(file)
assert parameters_inspection_dict == util.inspect_parameters(model)
def test_move_to_device(self):
# We're faking the tensor here so that we can test the calls to .cuda() without actually
# needing a GPU.
class FakeTensor(torch.Tensor):
def __init__(self):
self._device = None
def cuda(self, device):
self._device = device
return self
class A(NamedTuple):
a: int
b: torch.Tensor
structured_obj = {
"a": [A(1, FakeTensor()), A(2, FakeTensor())],
"b": FakeTensor(),
"c": (1, FakeTensor()),
}
new_device = torch.device(4)
moved_obj = util.move_to_device(structured_obj, new_device)
assert moved_obj["a"][0].a == 1
assert moved_obj["a"][0].b._device == new_device
assert moved_obj["a"][1].b._device == new_device
assert moved_obj["b"]._device == new_device
assert moved_obj["c"][0] == 1
assert moved_obj["c"][1]._device == new_device
def test_extend_layer(self):
lin_layer = torch.nn.Linear(10, 5)
new_dim = 8
old_weights = lin_layer.weight.data.clone()
old_bias = lin_layer.bias.data.clone()
util.extend_layer(lin_layer, new_dim)
assert lin_layer.weight.data.shape == (8, 10)
assert lin_layer.bias.data.shape == (8,)
assert (lin_layer.weight.data[:5] == old_weights).all()
assert (lin_layer.bias.data[:5] == old_bias).all()
assert lin_layer.out_features == new_dim
def test_masked_topk_selects_top_scored_items_and_respects_masking(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :2, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, 2)
# Second element in the batch would have indices 2, 3, but
# 3 and 0 are masked, so instead it has 1, 2.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1], [1, 2], [2, 3]])
)
numpy.testing.assert_array_equal(pruned_mask.data.numpy(), numpy.ones([3, 2]))
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_completely_masked_rows(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :2, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
mask[2, :] = 0 # fully masked last batch element.
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, 2)
# We can't check the last row here, because it's completely masked.
# Instead we'll check that the scores for these elements are very small.
numpy.testing.assert_array_equal(
pruned_indices[:2].data.numpy(), numpy.array([[0, 1], [1, 2]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1], [1, 1], [0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_selects_top_scored_items_and_respects_masking_different_num_items(self):
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, 0, :] = 1.5
items[0, 1, :] = 2
items[0, 3, :] = 1
items[1, 1:3, :] = 1
items[2, 0, :] = 1
items[2, 1, :] = 2
items[2, 2, :] = 1.5
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 3] = 0
k = torch.tensor([3, 2, 1], dtype=torch.long)
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, k)
# Second element in the batch would have indices 2, 3, but
# 3 and 0 are masked, so instead it has 1, 2.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1, 3], [1, 2, 2], [1, 2, 2]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_row_with_no_items_requested(self):
# Case where `num_items_to_keep` is a tensor rather than an int. Make sure it does the right
# thing when no items are requested for one of the rows.
items = torch.randn([3, 4, 5]).clamp(min=0.0, max=1.0)
items[0, :3, :] = 1
items[1, 2:, :] = 1
items[2, 2:, :] = 1
scores = items.sum(-1)
mask = torch.ones([3, 4]).bool()
mask[1, 0] = 0
mask[1, 3] = 0
k = torch.tensor([3, 2, 0], dtype=torch.long)
pruned_scores, pruned_mask, pruned_indices = util.masked_topk(scores, mask, k)
# First element just picks top three entries. Second would pick entries 2 and 3, but 0 and 3
# are masked, so it takes 1 and 2 (repeating the second index). The third element is
# entirely masked and just repeats the largest index with a top-3 score.
numpy.testing.assert_array_equal(
pruned_indices.data.numpy(), numpy.array([[0, 1, 2], [1, 2, 2], [3, 3, 3]])
)
numpy.testing.assert_array_equal(
pruned_mask.data.numpy(), numpy.array([[1, 1, 1], [1, 1, 0], [0, 0, 0]])
)
# scores should be the result of index_selecting the pruned_indices.
correct_scores = util.batched_index_select(scores.unsqueeze(-1), pruned_indices).squeeze(-1)
self.assert_array_equal_with_mask(correct_scores, pruned_scores, pruned_mask)
def test_masked_topk_works_for_multiple_dimensions(self):
# fmt: off
items = torch.FloatTensor([ # (3, 2, 5)
[[4, 2, 9, 9, 7], [-4, -2, -9, -9, -7]],
[[5, 4, 1, 8, 8], [9, 1, 7, 4, 1]],
[[9, 8, 9, 6, 0], [2, 2, 2, 2, 2]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
mask = torch.tensor([
[[False, False, False, False, False], [True, True, True, True, True]],
[[True, True, True, True, False], [False, True, True, True, True]],
[[True, False, True, True, True], [False, True, False, True, True]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
# This is the same as just specifying a scalar int, but we want to test this behavior
k = torch.ones(3, 5, 4, dtype=torch.long)
k[1, 3, :] = 2
target_items = torch.FloatTensor([
[[-4, -2, -9, -9, -7], [0, 0, 0, 0, 0]],
[[5, 4, 7, 8, 1], [0, 0, 0, 4, 0]],
[[9, 2, 9, 6, 2], [0, 0, 0, 0, 0]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
target_mask = torch.ones(3, 2, 5, 4, dtype=torch.bool)
target_mask[:, 1, :, :] = 0
target_mask[1, 1, 3, :] = 1
target_indices = torch.LongTensor([
[[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 1], [0, 0, 0, 1, 0]],
[[0, 1, 0, 0, 1], [0, 0, 0, 0, 0]],
]).unsqueeze(-1).expand(3, 2, 5, 4)
# fmt: on
pruned_items, pruned_mask, pruned_indices = util.masked_topk(items, mask, k, dim=1)
numpy.testing.assert_array_equal(pruned_mask.data.numpy(), target_mask.data.numpy())
self.assert_array_equal_with_mask(pruned_items, target_items, pruned_mask)
self.assert_array_equal_with_mask(pruned_indices, target_indices, pruned_mask)
def assert_array_equal_with_mask(self, a, b, mask):
numpy.testing.assert_array_equal((a * mask).data.numpy(), (b * mask).data.numpy())
def test_tensors_equal(self):
# Basic
assert util.tensors_equal(torch.tensor([1]), torch.tensor([1]))
assert not util.tensors_equal(torch.tensor([1]), torch.tensor([2]))
# Bool
assert util.tensors_equal(torch.tensor([True]), torch.tensor([True]))
# Cross dtype
assert util.tensors_equal(torch.tensor([1]), torch.tensor([1.0]))
assert util.tensors_equal(torch.tensor([1]), torch.tensor([True]))
# Containers
assert util.tensors_equal([torch.tensor([1])], [torch.tensor([1])])
assert not util.tensors_equal([torch.tensor([1])], [torch.tensor([2])])
assert util.tensors_equal({"key": torch.tensor([1])}, {"key": torch.tensor([1])})
def test_info_value_of_dtype(self):
with pytest.raises(TypeError):
util.info_value_of_dtype(torch.bool)
assert util.min_value_of_dtype(torch.half) == -65504.0
assert util.max_value_of_dtype(torch.half) == 65504.0
assert util.tiny_value_of_dtype(torch.half) == 1e-4
assert util.min_value_of_dtype(torch.float) == -3.4028234663852886e38
assert util.max_value_of_dtype(torch.float) == 3.4028234663852886e38
assert util.tiny_value_of_dtype(torch.float) == 1e-13
assert util.min_value_of_dtype(torch.uint8) == 0
assert util.max_value_of_dtype(torch.uint8) == 255
assert util.min_value_of_dtype(torch.long) == -9223372036854775808
assert util.max_value_of_dtype(torch.long) == 9223372036854775807
def test_get_token_ids_from_text_field_tensors(self):
# Setting up a number of diffrent indexers, that we can test later.
string_tokens = ["This", "is", "a", "test"]
tokens = [Token(x) for x in string_tokens]
vocab = Vocabulary()
vocab.add_tokens_to_namespace(string_tokens, "tokens")
vocab.add_tokens_to_namespace(
set([char for token in string_tokens for char in token]), "token_characters"
)
elmo_indexer = ELMoTokenCharactersIndexer()
token_chars_indexer = TokenCharactersIndexer()
single_id_indexer = SingleIdTokenIndexer()
indexers = {"elmo": elmo_indexer, "chars": token_chars_indexer, "tokens": single_id_indexer}
# In all of the tests below, we'll want to recover the token ides that were produced by the
# single_id indexer, so we grab that output first.
text_field = TextField(tokens, {"tokens": single_id_indexer})
text_field.index(vocab)
tensors = text_field.as_tensor(text_field.get_padding_lengths())
expected_token_ids = tensors["tokens"]["tokens"]
# Now the actual tests.
text_field = TextField(tokens, indexers)
text_field.index(vocab)
tensors = text_field.as_tensor(text_field.get_padding_lengths())
token_ids = util.get_token_ids_from_text_field_tensors(tensors)
assert (token_ids == expected_token_ids).all()
| allennlp-master | tests/nn/util_test.py |
from typing import Dict, Tuple
import numpy as np
import pytest
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.nn.beam_search import (
MultinomialSampler,
BeamSearch,
TopKSampler,
TopPSampler,
GumbelSampler,
)
from allennlp.common.params import Params
transition_probabilities = torch.tensor(
[
[0.0, 0.4, 0.3, 0.2, 0.1, 0.0], # start token -> jth token
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0], # 1st token -> jth token
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0], # 2nd token -> jth token
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0], # ...
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], # ...
[0.2, 0.1, 0.2, 0.2, 0.2, 0.3],
] # end token -> jth token
)
log_probabilities = torch.log(
torch.tensor([[0.1, 0.3, 0.3, 0.3, 0.0, 0.0], [0.0, 0.0, 0.4, 0.3, 0.2, 0.1]])
)
def take_step_no_timestep(
last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take decoding step.
This is a simple function that defines how probabilities are computed for the
next time step during the beam search.
We use a simple target vocabulary of size 6. In this vocabulary, index 0 represents
the start token, and index 5 represents the end token. The transition probability
from a state where the last predicted token was token `j` to new token `i` is
given by the `(i, j)` element of the matrix `transition_probabilities`.
"""
log_probs_list = []
for last_token in last_predictions:
log_probs = torch.log(transition_probabilities[last_token.item()])
log_probs_list.append(log_probs)
return torch.stack(log_probs_list), state
def take_step_with_timestep(
last_predictions: torch.Tensor,
state: Dict[str, torch.Tensor],
timestep: int,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
return take_step_no_timestep(last_predictions, state)
class BeamSearchTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.end_index = transition_probabilities.size()[0] - 1
self.beam_search = BeamSearch(self.end_index, max_steps=10, beam_size=3)
# This is what the top k should look like for each item in the batch.
self.expected_top_k = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 5], [3, 4, 5, 5, 5]])
# This is what the log probs should look like for each item in the batch.
self.expected_log_probs = np.log(np.array([0.4, 0.3, 0.2]))
def _check_results(
self,
batch_size: int = 5,
expected_top_k: np.array = None,
expected_log_probs: np.array = None,
beam_search: BeamSearch = None,
state: Dict[str, torch.Tensor] = None,
take_step=take_step_with_timestep,
) -> None:
expected_top_k = expected_top_k if expected_top_k is not None else self.expected_top_k
expected_log_probs = (
expected_log_probs if expected_log_probs is not None else self.expected_log_probs
)
state = state or {}
beam_search = beam_search or self.beam_search
beam_size = beam_search.beam_size
initial_predictions = torch.tensor([0] * batch_size)
top_k, log_probs = beam_search.search(initial_predictions, state, take_step) # type: ignore
# top_k should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(top_k.size())[:-1] == [batch_size, beam_size]
np.testing.assert_array_equal(top_k[0].numpy(), expected_top_k)
# log_probs should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(log_probs.size()) == [batch_size, beam_size]
np.testing.assert_allclose(log_probs[0].numpy(), expected_log_probs)
@pytest.mark.parametrize("step_function", [take_step_with_timestep, take_step_no_timestep])
def test_search(self, step_function):
self._check_results(take_step=step_function)
def test_finished_state(self):
state = {}
state["foo"] = torch.tensor([[1, 0, 1], [2, 0, 1], [0, 0, 1], [1, 1, 1], [0, 0, 0]])
# shape: (batch_size, 3)
expected_finished_state = {}
expected_finished_state["foo"] = np.array(
[
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
]
)
# shape: (batch_size x beam_size, 3)
self._check_results(state=state)
# check finished state.
for key, array in expected_finished_state.items():
np.testing.assert_allclose(state[key].numpy(), array)
def test_diff_shape_state(self):
state = {}
state["decoder_hidden"] = torch.tensor(
[[1, 0, 1], [2, 0, 1], [0, 0, 1], [1, 1, 1], [0, 0, 0]]
)
state["decoder_hidden"] = state["decoder_hidden"].unsqueeze(0).repeat(2, 1, 1)
# shape: (2, batch_size, 3)
seq = [
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
]
seq = [seq] * 2
expected_finished_state = {}
expected_finished_state["decoder_hidden"] = np.array(seq)
# shape: (2, batch_size x beam_size, 3)
self._check_results(state=state)
# check finished state.
for key, array in expected_finished_state.items():
np.testing.assert_allclose(state[key].numpy(), array)
def test_batch_size_of_one(self):
self._check_results(batch_size=1)
def test_greedy_search(self):
beam_search = BeamSearch(self.end_index, beam_size=1)
expected_top_k = np.array([[1, 2, 3, 4, 5]])
expected_log_probs = np.log(np.array([0.4]))
self._check_results(
expected_top_k=expected_top_k,
expected_log_probs=expected_log_probs,
beam_search=beam_search,
)
def test_single_step(self):
self.beam_search.max_steps = 1
expected_top_k = np.array([[1], [2], [3]])
expected_log_probs = np.log(np.array([0.4, 0.3, 0.2]))
self._check_results(
expected_top_k=expected_top_k,
expected_log_probs=expected_log_probs,
)
def test_early_stopping(self):
"""
Checks case where beam search will reach `max_steps` before finding end tokens.
"""
beam_search = BeamSearch(self.end_index, beam_size=3, max_steps=3)
expected_top_k = np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
expected_log_probs = np.log(np.array([0.4, 0.3, 0.2]))
self._check_results(
expected_top_k=expected_top_k,
expected_log_probs=expected_log_probs,
beam_search=beam_search,
)
def test_different_per_node_beam_size(self):
# per_node_beam_size = 1
beam_search = BeamSearch(self.end_index, beam_size=3, per_node_beam_size=1)
self._check_results(beam_search=beam_search)
# per_node_beam_size = 2
beam_search = BeamSearch(self.end_index, beam_size=3, per_node_beam_size=2)
self._check_results(beam_search=beam_search)
def test_catch_bad_config(self):
"""
If `per_node_beam_size` (which defaults to `beam_size`) is larger than
the size of the target vocabulary, `BeamSearch.search` should raise
a ConfigurationError.
"""
beam_search = BeamSearch(self.end_index, beam_size=20)
with pytest.raises(ConfigurationError):
self._check_results(beam_search=beam_search)
def test_warn_for_bad_log_probs(self):
# The only valid next step from the initial predictions is the end index.
# But with a beam size of 3, the call to `topk` to find the 3 most likely
# next beams will result in 2 new beams that are invalid, in that have probability of 0.
# The beam search should warn us of this.
initial_predictions = torch.LongTensor([self.end_index - 1, self.end_index - 1])
with pytest.warns(RuntimeWarning, match="Infinite log probabilities"):
self.beam_search.search(initial_predictions, {}, take_step_no_timestep)
def test_empty_sequences(self):
initial_predictions = torch.LongTensor([self.end_index - 1, self.end_index - 1])
beam_search = BeamSearch(self.end_index, beam_size=1)
with pytest.warns(RuntimeWarning, match="Empty sequences predicted"):
predictions, log_probs = beam_search.search(
initial_predictions, {}, take_step_with_timestep
)
# predictions hould have shape `(batch_size, beam_size, max_predicted_length)`.
assert list(predictions.size()) == [2, 1, 1]
# log probs hould have shape `(batch_size, beam_size)`.
assert list(log_probs.size()) == [2, 1]
assert (predictions == self.end_index).all()
assert (log_probs == 0).all()
def test_default_from_params_params(self):
beam_search = BeamSearch.from_params(Params({"beam_size": 2, "end_index": 7}))
assert beam_search.beam_size == 2
assert beam_search._end_index == 7
def test_top_p_search(self):
initial_predictions = torch.tensor([0] * 5)
beam_size = 3
take_step = take_step_with_timestep
p_sampler = TopPSampler(p=0.8)
top_p, log_probs = BeamSearch(
self.end_index, beam_size=beam_size, max_steps=10, sampler=p_sampler
).search(initial_predictions, {}, take_step)
beam_size = beam_size or 1
batch_size = 5
# top_p should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(top_p.size())[:-1] == [batch_size, beam_size]
assert ((0 <= top_p) & (top_p <= 5)).all()
# log_probs should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(log_probs.size()) == [batch_size, beam_size]
@pytest.mark.parametrize("p_val", [-1.0, 1.2, 1.1, float("inf")])
def test_p_val(self, p_val):
with pytest.raises(ValueError):
initial_predictions = torch.tensor([0] * 5)
take_step = take_step_with_timestep
beam_size = 3
p_sampler = TopPSampler(p=p_val, with_replacement=True)
top_k, log_probs = BeamSearch(
self.end_index, beam_size=beam_size, max_steps=10, sampler=p_sampler
).search(initial_predictions, {}, take_step)
def test_top_k_search(self):
initial_predictions = torch.tensor([0] * 5)
beam_size = 3
take_step = take_step_with_timestep
k_sampler = TopKSampler(k=5, with_replacement=True)
top_k, log_probs = BeamSearch(
self.end_index, beam_size=beam_size, max_steps=10, sampler=k_sampler
).search(initial_predictions, {}, take_step)
beam_size = beam_size or 1
batch_size = 5
# top_p should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(top_k.size())[:-1] == [batch_size, beam_size]
assert ((0 <= top_k) & (top_k <= 5)).all()
# log_probs should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(log_probs.size()) == [batch_size, beam_size]
@pytest.mark.parametrize("k_val", [-1, 0])
def test_k_val(self, k_val):
with pytest.raises(ValueError):
initial_predictions = torch.tensor([0] * 5)
take_step = take_step_with_timestep
beam_size = 3
k_sampler = TopKSampler(k=k_val, with_replacement=True)
top_k, log_probs = BeamSearch(
self.end_index, beam_size=beam_size, max_steps=10, sampler=k_sampler
).search(initial_predictions, {}, take_step)
def test_stochastic_beam_search(self):
initial_predictions = torch.tensor([0] * 5)
batch_size = 5
beam_size = 3
take_step = take_step_with_timestep
gumbel_sampler = GumbelSampler()
top_k, log_probs = BeamSearch(
self.end_index, beam_size=beam_size, max_steps=10, sampler=gumbel_sampler
).search(initial_predictions, {}, take_step)
# top_p should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(top_k.size())[:-1] == [batch_size, beam_size]
assert ((0 <= top_k) & (top_k <= 5)).all()
# log_probs should be shape `(batch_size, beam_size, max_predicted_length)`.
assert list(log_probs.size()) == [batch_size, beam_size]
# Check to make sure that once the end index is predicted, all subsequent tokens
# must be the end index. This has been tested on toy examples in which
for batch in top_k:
for beam in batch:
reached_end = False
for token in beam:
if token == self.end_index:
reached_end = True
if reached_end:
assert token == self.end_index
def test_params_sampling(self):
beam_search = BeamSearch.from_params(
Params(
{
"sampler": {
"type": "top-k",
"k": 4,
},
"beam_size": 2,
"end_index": 7,
}
)
)
assert beam_search.beam_size == 2
assert beam_search._end_index == 7
assert beam_search.sampler is not None
def test_params_p_sampling(self):
beam_search = BeamSearch.from_params(
Params(
{
"sampler": {
"type": "top-p",
"p": 0.8,
},
"beam_size": 2,
"end_index": 7,
}
)
)
assert beam_search.beam_size == 2
assert beam_search._end_index == 7
assert beam_search.sampler is not None
def test_multinomial_sampler(self):
sampler = MultinomialSampler(temperature=0.9)
probabilities, classes, state = sampler.sample_nodes(log_probabilities, 3, {"foo": "bar"})
assert probabilities.size() == classes.size()
assert classes.size() == (2, 3)
assert all([x < 4 for x in classes[0]])
assert all([x > 1 for x in classes[1]])
def test_top_k_sampler(self):
sampler = TopKSampler(k=3, temperature=0.9)
probabilities, classes, state = sampler.sample_nodes(log_probabilities, 3, {"foo": "bar"})
assert probabilities.size() == classes.size()
assert classes.size() == (2, 3)
assert all([x > 0 and x < 4 for x in classes[0]])
assert all([x > 1 and x < 5 for x in classes[1]])
def test_top_p_sampler(self):
sampler = TopPSampler(p=0.8, temperature=0.9)
probabilities, classes, state = sampler.sample_nodes(log_probabilities, 3, {"foo": "bar"})
assert probabilities.size() == classes.size()
assert classes.size() == (2, 3)
assert all([x > 0 and x < 4 for x in classes[0]])
assert all([x > 1 and x < 5 for x in classes[1]])
# Make sure the filtered classes include the first class that exceeds p
sampler = TopPSampler(p=0.7, temperature=1.0)
probabilities, classes, state = sampler.sample_nodes(log_probabilities, 2, {"foo": "bar"})
assert all([x == 2 or x == 3 or x == 1 for x in classes[0]])
assert all([x == 2 or x == 3 for x in classes[1]])
def test_gumbel_sampler(self):
sampler = GumbelSampler()
num_classes = len(log_probabilities[0])
sampler_state = sampler.init_state(log_probabilities, batch_size=2, num_classes=num_classes)
log_probs, indices, state = sampler.sample_beams(log_probabilities, 3, sampler_state)
assert log_probs.size() == indices.size()
assert indices.size() == (2, 3)
# Make sure the probabilities are sorted.
_, sorted_indices = log_probs.sort(dim=-1, descending=True)
assert (sorted_indices == torch.arange(3).unsqueeze(0)).all()
assert all([x >= 0 and x < 4 for x in indices[0]])
assert all([x > 1 and x <= 5 for x in indices[1]])
| allennlp-master | tests/nn/beam_search_test.py |
import os
from typing import Dict
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
from allennlp.models.model import Model
from allennlp.training import NoOpTrainer
class ConstantModel(Model):
def forward(self, *inputs) -> Dict[str, torch.Tensor]:
return {"class": torch.tensor(98)}
class TestNoOpTrainer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.instances = SequenceTaggingDatasetReader().read(
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
)
vocab = Vocabulary.from_instances(self.instances)
self.vocab = vocab
self.model = ConstantModel(vocab)
def test_trainer_serializes(self):
serialization_dir = self.TEST_DIR / "serialization_dir"
trainer = NoOpTrainer(serialization_dir=serialization_dir, model=self.model)
metrics = trainer.train()
assert metrics == {}
assert os.path.exists(serialization_dir / "best.th")
assert os.path.exists(serialization_dir / "vocabulary")
| allennlp-master | tests/training/no_op_trainer_test.py |
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
from allennlp.data import PyTorchDataLoader
from allennlp.models.simple_tagger import SimpleTagger
from allennlp.training import GradientDescentTrainer
from allennlp.training.optimizers import Optimizer
class TestOptimizer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.instances = SequenceTaggingDatasetReader().read(
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
)
vocab = Vocabulary.from_instances(self.instances)
self.model_params = Params(
{
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
}
)
self.model = SimpleTagger.from_params(vocab=vocab, params=self.model_params)
def test_optimizer_basic(self):
optimizer_params = Params({"type": "sgd", "lr": 1})
parameters = [[n, p] for n, p in self.model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(model_parameters=parameters, params=optimizer_params)
param_groups = optimizer.param_groups
assert len(param_groups) == 1
assert param_groups[0]["lr"] == 1
def test_optimizer_parameter_groups(self):
optimizer_params = Params(
{
"type": "sgd",
"lr": 1,
"momentum": 5,
"parameter_groups": [
# the repeated "bias_" checks a corner case
# NOT_A_VARIABLE_NAME displays a warning but does not raise an exception
[["weight_i", "bias_", "bias_", "NOT_A_VARIABLE_NAME"], {"lr": 2}],
[["tag_projection_layer"], {"lr": 3}],
[["^text_field_embedder.*$"], {"requires_grad": False}],
],
}
)
# Before initializing the optimizer all params in this module will still require grad.
assert all([param.requires_grad for param in self.model.text_field_embedder.parameters()])
parameters = [[n, p] for n, p in self.model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(model_parameters=parameters, params=optimizer_params)
param_groups = optimizer.param_groups
# After initializing the optimizer, requires_grad should be false for all params in this module.
assert not any(
[param.requires_grad for param in self.model.text_field_embedder.parameters()]
)
assert len(param_groups) == 3
assert param_groups[0]["lr"] == 2
assert param_groups[1]["lr"] == 3
# base case uses default lr
assert param_groups[2]["lr"] == 1
for k in range(3):
assert param_groups[k]["momentum"] == 5
# all LSTM parameters except recurrent connections (those with weight_h in name)
assert len(param_groups[0]["params"]) == 6
# just the projection weight and bias
assert len(param_groups[1]["params"]) == 2
# the recurrent connections left in the default group
assert len(param_groups[2]["params"]) == 2
class TestDenseSparseAdam(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.instances = SequenceTaggingDatasetReader().read(
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
)
self.vocab = Vocabulary.from_instances(self.instances)
self.model_params = Params(
{
"text_field_embedder": {
"token_embedders": {
"tokens": {"type": "embedding", "embedding_dim": 5, "sparse": True}
}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
}
)
self.model = SimpleTagger.from_params(vocab=self.vocab, params=self.model_params)
def test_can_optimise_model_with_dense_and_sparse_params(self):
optimizer_params = Params({"type": "dense_sparse_adam"})
parameters = [[n, p] for n, p in self.model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(model_parameters=parameters, params=optimizer_params)
self.instances.index_with(self.vocab)
GradientDescentTrainer(self.model, optimizer, PyTorchDataLoader(self.instances, 2)).train()
| allennlp-master | tests/training/optimizer_test.py |
import copy
import glob
import json
import os
import re
import time
from typing import Any, Dict, List
import math
import pytest
import torch
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm_
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase, requires_gpu, requires_multi_gpu
from allennlp.data import Vocabulary
from allennlp.data.dataloader import TensorDict
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
from allennlp.models.model import Model
from allennlp.models.simple_tagger import SimpleTagger
from allennlp.training import (
GradientDescentTrainer,
Checkpointer,
TensorboardWriter,
BatchCallback,
EpochCallback,
TrainerCallback,
TrackEpochCallback,
)
from allennlp.training.learning_rate_schedulers import CosineWithRestarts
from allennlp.training.learning_rate_schedulers import ExponentialLearningRateScheduler
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.moving_average import ExponentialMovingAverage
from allennlp.data import allennlp_collate
class TrainerTestBase(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.instances = SequenceTaggingDatasetReader().read(
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
)
self.instances_lazy = SequenceTaggingDatasetReader(lazy=True).read(
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
)
vocab = Vocabulary.from_instances(self.instances)
self.vocab = vocab
self.model_params = Params(
{
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
}
)
self.model = SimpleTagger.from_params(vocab=self.vocab, params=self.model_params)
self.optimizer = torch.optim.SGD(self.model.parameters(), 0.01, momentum=0.9)
self.data_loader = DataLoader(self.instances, batch_size=2, collate_fn=allennlp_collate)
self.data_loader_lazy = DataLoader(
self.instances_lazy, batch_size=2, collate_fn=allennlp_collate
)
self.validation_data_loader = DataLoader(
self.instances, batch_size=2, collate_fn=allennlp_collate
)
self.instances.index_with(vocab)
self.instances_lazy.index_with(vocab)
class TestTrainer(TrainerTestBase):
def test_trainer_can_run(self):
trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=2,
)
metrics = trainer.train()
assert "best_validation_loss" in metrics
assert isinstance(metrics["best_validation_loss"], float)
assert "best_validation_accuracy" in metrics
assert isinstance(metrics["best_validation_accuracy"], float)
assert "best_validation_accuracy3" in metrics
assert isinstance(metrics["best_validation_accuracy3"], float)
assert "best_epoch" in metrics
assert isinstance(metrics["best_epoch"], int)
# Making sure that both increasing and decreasing validation metrics work.
trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
validation_data_loader=self.validation_data_loader,
validation_metric="+loss",
num_epochs=2,
)
metrics = trainer.train()
assert "best_validation_loss" in metrics
assert isinstance(metrics["best_validation_loss"], float)
assert "best_validation_accuracy" in metrics
assert isinstance(metrics["best_validation_accuracy"], float)
assert "best_validation_accuracy3" in metrics
assert isinstance(metrics["best_validation_accuracy3"], float)
assert "best_epoch" in metrics
assert isinstance(metrics["best_epoch"], int)
assert "peak_worker_0_memory_MB" in metrics
assert isinstance(metrics["peak_worker_0_memory_MB"], float)
assert metrics["peak_worker_0_memory_MB"] > 0
def test_trainer_can_run_exponential_moving_average(self):
moving_average = ExponentialMovingAverage(self.model.named_parameters(), decay=0.9999)
trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=2,
moving_average=moving_average,
)
trainer.train()
@requires_gpu
def test_trainer_can_run_cuda(self):
self.model.cuda()
trainer = GradientDescentTrainer(
self.model, self.optimizer, self.data_loader, num_epochs=2, cuda_device=0
)
metrics = trainer.train()
assert "peak_worker_0_memory_MB" in metrics
assert isinstance(metrics["peak_worker_0_memory_MB"], float)
assert metrics["peak_worker_0_memory_MB"] > 0
assert "peak_gpu_0_memory_MB" in metrics
assert isinstance(metrics["peak_gpu_0_memory_MB"], float)
@requires_multi_gpu
def test_passing_trainer_multiple_gpus_raises_error(self):
self.model.cuda()
with pytest.raises(ConfigurationError):
GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=2,
cuda_device=[0, 1],
)
def test_data_loader_lazy_epoch_size_correct(self):
num_epochs = 3
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader_lazy,
validation_data_loader=self.validation_data_loader,
num_epochs=num_epochs,
serialization_dir=self.TEST_DIR,
)
assert trainer._batch_num_total == 0
metrics = trainer.train()
epoch = metrics["epoch"]
assert epoch == num_epochs - 1
assert trainer._batch_num_total == num_epochs * 2
def test_data_loader_lazy_epoch_size_correct_custom_epoch_size(self):
batches_per_epoch = 3
num_epochs = 3
data_loader_custom_epoch_lazy = PyTorchDataLoader(
self.instances_lazy,
batch_size=2,
collate_fn=allennlp_collate,
batches_per_epoch=batches_per_epoch,
)
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
data_loader_custom_epoch_lazy,
validation_data_loader=self.validation_data_loader,
num_epochs=num_epochs,
serialization_dir=self.TEST_DIR,
)
assert trainer._batch_num_total == 0
metrics = trainer.train()
epoch = metrics["epoch"]
assert epoch == num_epochs - 1
assert trainer._batch_num_total == num_epochs * batches_per_epoch
def test_trainer_respects_epoch_size_equals_total(self):
batches_per_epoch = 4
num_epochs = 3
data_loader_equal_epoch = PyTorchDataLoader(
self.instances,
batch_size=2,
collate_fn=allennlp_collate,
batches_per_epoch=batches_per_epoch,
)
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
data_loader_equal_epoch,
validation_data_loader=self.validation_data_loader,
num_epochs=num_epochs,
serialization_dir=self.TEST_DIR,
)
assert trainer._batch_num_total == 0
metrics = trainer.train()
epoch = metrics["epoch"]
assert epoch == num_epochs - 1
assert trainer._batch_num_total == num_epochs * batches_per_epoch
def test_trainer_respects_epoch_size_larger_tnan_total(self):
batches_per_epoch = 7
num_epochs = 3
data_loader_larger_epoch = PyTorchDataLoader(
self.instances,
batch_size=2,
collate_fn=allennlp_collate,
batches_per_epoch=batches_per_epoch,
)
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
data_loader_larger_epoch,
validation_data_loader=self.validation_data_loader,
num_epochs=num_epochs,
serialization_dir=self.TEST_DIR,
)
assert trainer._batch_num_total == 0
metrics = trainer.train()
epoch = metrics["epoch"]
assert epoch == num_epochs - 1
assert trainer._batch_num_total == num_epochs * batches_per_epoch
def test_trainer_respects_epoch_size_smaller_tnan_total(self):
batches_per_epoch = 1
num_epochs = 2
data_loader_smaller_epoch = PyTorchDataLoader(
self.instances,
batch_size=2,
collate_fn=allennlp_collate,
batches_per_epoch=batches_per_epoch,
)
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
data_loader_smaller_epoch,
validation_data_loader=self.validation_data_loader,
num_epochs=num_epochs,
serialization_dir=self.TEST_DIR,
)
assert trainer._batch_num_total == 0
metrics = trainer.train()
epoch = metrics["epoch"]
assert epoch == num_epochs - 1
assert trainer._batch_num_total == num_epochs * batches_per_epoch
def test_trainer_can_resume_training(self):
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=1,
serialization_dir=self.TEST_DIR,
)
trainer.train()
new_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
)
epoch = new_trainer._restore_checkpoint()
assert epoch == 1
tracker = trainer._metric_tracker
assert tracker.is_best_so_far()
assert tracker._best_so_far is not None
new_trainer.train()
def test_trainer_can_resume_training_for_exponential_moving_average(self):
moving_average = ExponentialMovingAverage(self.model.named_parameters())
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=1,
serialization_dir=self.TEST_DIR,
moving_average=moving_average,
)
trainer.train()
new_moving_average = ExponentialMovingAverage(self.model.named_parameters())
new_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
moving_average=new_moving_average,
)
epoch = new_trainer._restore_checkpoint()
assert epoch == 1
tracker = trainer._metric_tracker
assert tracker.is_best_so_far()
assert tracker._best_so_far is not None
new_trainer.train()
def test_metric_only_considered_best_so_far_when_strictly_better_than_those_before_it_increasing_metric(
self,
):
new_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
patience=5,
validation_metric="+test",
)
tracker = new_trainer._metric_tracker
# when it is the only metric it should be considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metric(1)
assert new_tracker.is_best_so_far()
# when it is the same as one before it it is not considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.3, 0.2, 0.5, 0.1, 0.3])
assert not new_tracker.is_best_so_far()
# when it is the best it is considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.3, 0.2, 0.5, 0.1, 13])
assert new_tracker.is_best_so_far()
# when it is not the the best it is not considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.3, 0.2, 0.5, 0.1, 0.0013])
assert not new_tracker.is_best_so_far()
def test_metric_only_considered_best_so_far_when_strictly_better_than_those_before_it_decreasing_metric(
self,
):
new_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
patience=5,
validation_metric="-test",
)
tracker = new_trainer._metric_tracker
# when it is the only metric it should be considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metric(1)
assert new_tracker.is_best_so_far()
# when it is the same as one before it it is not considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.3, 0.2, 0.5, 0.1, 0.3])
assert not new_tracker.is_best_so_far()
# when it is the best it is considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.3, 0.2, 0.5, 0.1, 0.0013])
assert new_tracker.is_best_so_far()
# when it is not the the best it is not considered the best
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.3, 0.2, 0.5, 0.1, 13])
def test_should_stop_early_with_increasing_metric(self):
new_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
patience=5,
validation_metric="+test",
)
tracker = new_trainer._metric_tracker
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.5, 0.3, 0.2, 0.1, 0.4, 0.4])
assert new_tracker.should_stop_early()
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.3, 0.2, 0.5, 0.1])
assert not new_tracker.should_stop_early()
def test_should_stop_early_with_flat_lining_metric(self):
flatline = [0.2] * 6
tracker = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
patience=5,
validation_metric="+test",
)._metric_tracker
tracker.add_metrics(flatline)
assert tracker.should_stop_early
tracker = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
patience=5,
validation_metric="-test",
)._metric_tracker
tracker.add_metrics(flatline)
assert tracker.should_stop_early
def test_should_stop_early_with_decreasing_metric(self):
new_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
patience=5,
validation_metric="-test",
)
tracker = new_trainer._metric_tracker
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.02, 0.3, 0.2, 0.1, 0.4, 0.4])
assert new_tracker.should_stop_early()
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.3, 0.3, 0.2, 0.1, 0.4, 0.5])
assert not new_tracker.should_stop_early()
new_tracker = copy.deepcopy(tracker)
new_tracker.add_metrics([0.1, 0.3, 0.2, 0.1, 0.4, 0.5])
assert new_tracker.should_stop_early()
def test_should_stop_early_with_early_stopping_disabled(self):
# Increasing metric
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=100,
patience=None,
validation_metric="+test",
)
tracker = trainer._metric_tracker
tracker.add_metrics([float(i) for i in reversed(range(20))])
assert not tracker.should_stop_early()
# Decreasing metric
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=100,
patience=None,
validation_metric="-test",
)
tracker = trainer._metric_tracker
tracker.add_metrics([float(i) for i in range(20)])
assert not tracker.should_stop_early()
def test_should_stop_early_with_invalid_patience(self):
for patience in [0, -1, -2, 1.5, "None"]:
with pytest.raises(
ConfigurationError,
match='.* is an invalid value for "patience": '
"it must be a positive integer or None "
"\\(if you want to disable early stopping\\)",
):
GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=100,
patience=patience,
validation_metric="+test",
)
def test_trainer_can_run_and_resume_with_momentum_scheduler(self):
scheduler = MomentumScheduler.from_params(
optimizer=self.optimizer,
params=Params({"type": "inverted_triangular", "cool_down": 2, "warm_up": 2}),
)
trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
momentum_scheduler=scheduler,
validation_metric="-loss",
validation_data_loader=self.validation_data_loader,
num_epochs=4,
serialization_dir=self.TEST_DIR,
)
trainer.train()
new_scheduler = MomentumScheduler.from_params(
optimizer=self.optimizer,
params=Params({"type": "inverted_triangular", "cool_down": 2, "warm_up": 2}),
)
new_trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
momentum_scheduler=new_scheduler,
validation_metric="-loss",
validation_data_loader=self.validation_data_loader,
num_epochs=6,
serialization_dir=self.TEST_DIR,
)
epoch = new_trainer._restore_checkpoint()
assert epoch == 4
assert new_trainer._momentum_scheduler.last_epoch == 3
new_trainer.train()
def test_trainer_can_run_with_lr_scheduler(self):
lr_scheduler = ExponentialLearningRateScheduler(self.optimizer, gamma=0.5)
trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
learning_rate_scheduler=lr_scheduler,
validation_metric="-loss",
validation_data_loader=self.validation_data_loader,
num_epochs=2,
)
trainer.train()
def test_trainer_can_resume_with_lr_scheduler(self):
lr_scheduler = CosineWithRestarts(self.optimizer, t_initial=5)
trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
learning_rate_scheduler=lr_scheduler,
validation_data_loader=self.validation_data_loader,
num_epochs=2,
serialization_dir=self.TEST_DIR,
)
trainer.train()
new_lr_scheduler = CosineWithRestarts(self.optimizer, t_initial=5)
new_trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
learning_rate_scheduler=new_lr_scheduler,
validation_data_loader=self.validation_data_loader,
num_epochs=4,
serialization_dir=self.TEST_DIR,
)
epoch = new_trainer._restore_checkpoint()
assert epoch == 2
assert new_trainer._learning_rate_scheduler.last_epoch == 1
new_trainer.train()
def test_trainer_raises_on_model_with_no_loss_key(self):
class FakeModel(Model):
def forward(self, **kwargs):
return {}
with pytest.raises(RuntimeError):
trainer = GradientDescentTrainer(
FakeModel(None),
self.optimizer,
self.data_loader,
num_epochs=2,
serialization_dir=self.TEST_DIR,
)
trainer.train()
def test_trainer_can_log_histograms(self):
# enable activation logging
for module in self.model.modules():
module.should_log_activations = True
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
tensorboard_writer=TensorboardWriter(
serialization_dir=self.TEST_DIR, histogram_interval=2
),
)
trainer.train()
def test_trainer_respects_num_serialized_models_to_keep(self):
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=5,
serialization_dir=self.TEST_DIR,
checkpointer=Checkpointer(
serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=3
),
)
trainer.train()
# Now check the serialized files
for prefix in ["model_state_epoch_*", "training_state_epoch_*"]:
file_names = glob.glob(os.path.join(self.TEST_DIR, prefix))
epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names]
assert sorted(epochs) == [2, 3, 4]
def test_trainer_saves_metrics_every_epoch(self):
trainer = GradientDescentTrainer(
model=self.model,
optimizer=self.optimizer,
data_loader=self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=5,
serialization_dir=self.TEST_DIR,
checkpointer=Checkpointer(
serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=3
),
)
trainer.train()
for epoch in range(5):
epoch_file = self.TEST_DIR / f"metrics_epoch_{epoch}.json"
assert epoch_file.exists()
metrics = json.load(open(epoch_file))
assert "validation_loss" in metrics
assert "best_validation_loss" in metrics
assert metrics.get("epoch") == epoch
def test_trainer_respects_keep_serialized_model_every_num_seconds(self):
# To test:
# Create an fake data loader that sleeps for 2.5 second per epoch, so the total
# training time for one epoch is slightly greater then 2.5 seconds.
# Run for 6 epochs, keeping the last 2 models, models also kept every 5 seconds.
# Check the resulting checkpoints. Should then have models at epochs
# 2, 4, plus the last two at 5 and 6.
class SlowDataLoader:
data_loader = DataLoader(self.instances, batch_size=2, collate_fn=allennlp_collate)
def __iter__(self):
time.sleep(2.5)
return iter(self.data_loader)
def __len__(self):
return len(self.data_loader)
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
SlowDataLoader(),
num_epochs=6,
serialization_dir=self.TEST_DIR,
checkpointer=Checkpointer(
serialization_dir=self.TEST_DIR,
num_serialized_models_to_keep=2,
keep_serialized_model_every_num_seconds=5,
),
)
trainer.train()
# Now check the serialized files
for prefix in ["model_state_epoch_*", "training_state_epoch_*"]:
file_names = glob.glob(os.path.join(self.TEST_DIR, prefix))
epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names]
# epoch N has N-1 in file name
assert sorted(epochs) == [1, 3, 4, 5]
def test_trainer_can_log_learning_rates_tensorboard(self):
data_loader = DataLoader(self.instances, batch_size=4, collate_fn=allennlp_collate)
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
data_loader,
num_epochs=2,
serialization_dir=self.TEST_DIR,
tensorboard_writer=TensorboardWriter(
serialization_dir=self.TEST_DIR,
should_log_learning_rate=True,
summary_interval=2,
),
)
trainer.train()
def test_trainer_saves_models_at_specified_interval(self):
data_loader = DataLoader(self.instances, batch_size=4, collate_fn=allennlp_collate)
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
data_loader,
num_epochs=2,
serialization_dir=self.TEST_DIR,
checkpointer=Checkpointer(
serialization_dir=self.TEST_DIR,
model_save_interval=0.0001,
num_serialized_models_to_keep=10,
),
)
trainer.train()
# Now check the serialized files for models saved during the epoch.
prefix = "model_state_epoch_*"
file_names = sorted(glob.glob(os.path.join(self.TEST_DIR, prefix)))
epochs = [re.search(r"_([0-9\.\-]+)\.th", fname).group(1) for fname in file_names]
# We should have checkpoints at the end of each epoch and during each, e.g.
# [0.timestamp, 0, 1.timestamp, 1]
assert len(epochs) == 4
assert epochs[3] == "1"
assert "." in epochs[0]
# Now make certain we can restore from timestamped checkpoint.
# To do so, remove the checkpoint from the end of epoch 1&2, so
# that we are forced to restore from the timestamped checkpoints.
for k in range(2):
os.remove(os.path.join(self.TEST_DIR, "model_state_epoch_{}.th".format(k)))
os.remove(os.path.join(self.TEST_DIR, "training_state_epoch_{}.th".format(k)))
os.remove(os.path.join(self.TEST_DIR, "best.th"))
restore_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=2,
serialization_dir=self.TEST_DIR,
checkpointer=Checkpointer(serialization_dir=self.TEST_DIR, model_save_interval=0.0001),
)
epoch = restore_trainer._restore_checkpoint()
assert epoch == 2
# One batch per epoch.
assert restore_trainer._batch_num_total == 2
def test_trainer_saves_and_loads_best_validation_metrics_correctly_1(self):
# Use -loss and run 1 epoch of original-training, and one of restored-training
# Run 1 epoch of original training.
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
validation_metric="-loss",
num_epochs=1,
serialization_dir=self.TEST_DIR,
)
trainer.train()
_ = trainer._restore_checkpoint()
best_epoch_1 = trainer._metric_tracker.best_epoch
best_validation_metrics_epoch_1 = trainer._metric_tracker.best_epoch_metrics
# best_validation_metrics_epoch_1: {'accuracy': 0.75, 'accuracy3': 1.0, 'loss': 0.6243013441562653}
assert isinstance(best_validation_metrics_epoch_1, dict)
assert "loss" in best_validation_metrics_epoch_1
# Run 1 epoch of restored training.
restore_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
validation_metric="-loss",
num_epochs=2,
serialization_dir=self.TEST_DIR,
)
restore_trainer.train()
_ = restore_trainer._restore_checkpoint()
best_epoch_2 = restore_trainer._metric_tracker.best_epoch
best_validation_metrics_epoch_2 = restore_trainer._metric_tracker.best_epoch_metrics
# Because of using -loss, 2nd epoch would be better than 1st. So best val metrics should not be same.
assert best_epoch_1 == 0 and best_epoch_2 == 1
assert best_validation_metrics_epoch_2 != best_validation_metrics_epoch_1
def test_trainer_saves_and_loads_best_validation_metrics_correctly_2(self):
# Use -loss and run 1 epoch of original-training, and one of restored-training
# Run 1 epoch of original training.
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
validation_metric="+loss",
num_epochs=1,
serialization_dir=self.TEST_DIR,
)
trainer.train()
_ = trainer._restore_checkpoint()
best_epoch_1 = trainer._metric_tracker.best_epoch
best_validation_metrics_epoch_1 = trainer._metric_tracker.best_epoch_metrics
# best_validation_metrics_epoch_1: {'accuracy': 0.75, 'accuracy3': 1.0, 'loss': 0.6243013441562653}
assert isinstance(best_validation_metrics_epoch_1, dict)
assert "loss" in best_validation_metrics_epoch_1
# Run 1 more epoch of restored training.
restore_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
validation_metric="+loss",
num_epochs=2,
serialization_dir=self.TEST_DIR,
)
restore_trainer.train()
_ = restore_trainer._restore_checkpoint()
best_epoch_2 = restore_trainer._metric_tracker.best_epoch
best_validation_metrics_epoch_2 = restore_trainer._metric_tracker.best_epoch_metrics
# Because of using +loss, 2nd epoch won't be better than 1st. So best val metrics should be same.
assert best_epoch_1 == best_epoch_2 == 0
assert best_validation_metrics_epoch_2 == best_validation_metrics_epoch_1
def test_restored_training_returns_best_epoch_metrics_even_if_no_better_epoch_is_found_after_restoring(
self,
):
# Instead of -loss, use +loss to assure 2nd epoch is considered worse.
# Run 1 epoch of original training.
original_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
validation_metric="+loss",
num_epochs=1,
serialization_dir=self.TEST_DIR,
)
training_metrics = original_trainer.train()
# Run 1 epoch of restored training.
restored_trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
validation_metric="+loss",
num_epochs=2,
serialization_dir=self.TEST_DIR,
)
restored_metrics = restored_trainer.train()
assert "best_validation_loss" in restored_metrics
assert "best_validation_accuracy" in restored_metrics
assert "best_validation_accuracy3" in restored_metrics
assert "best_epoch" in restored_metrics
# Epoch 2 validation loss should be lesser than that of Epoch 1
assert training_metrics["best_validation_loss"] == restored_metrics["best_validation_loss"]
assert training_metrics["best_epoch"] == 0
assert training_metrics["validation_loss"] > restored_metrics["validation_loss"]
def test_restoring_works_with_older_checkpointing(self):
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=3,
serialization_dir=self.TEST_DIR,
checkpointer=Checkpointer(
serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=4
),
)
trainer.train()
for index in range(3):
path = str(self.TEST_DIR / "training_state_epoch_{}.th".format(index))
state = torch.load(path)
state.pop("metric_tracker")
state.pop("batch_num_total")
state["val_metric_per_epoch"] = [0.4, 0.1, 0.8]
torch.save(state, path)
next_epoch = trainer._restore_checkpoint()
best_epoch = trainer._metric_tracker.best_epoch
# Loss decreases in 3 epochs, but because we hard fed the val metrics as above:
assert next_epoch == 3
assert best_epoch == 1
assert trainer._metric_tracker._best_so_far == 0.1
assert trainer._metric_tracker._epochs_with_no_improvement == 1
def test_trainer_can_run_gradient_accumulation(self):
instances = list(self.instances)
steps_to_accumulate = 2
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
validation_data_loader=self.validation_data_loader,
num_epochs=2,
num_gradient_accumulation_steps=steps_to_accumulate,
)
assert trainer._num_gradient_accumulation_steps == steps_to_accumulate
metrics = trainer.train()
num_batches_trained_per_epoch = trainer._batch_num_total // (metrics["training_epochs"] + 1)
num_batches_expected = math.ceil(
math.ceil(len(instances) / self.data_loader.batch_size) / steps_to_accumulate
)
assert num_batches_trained_per_epoch == num_batches_expected
def test_batch_callback_is_called_at_every_batch(self):
class FakeBatchCallback(BatchCallback):
def __call__(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
if not hasattr(trainer, "batch_callback_calls"):
trainer.batch_callback_calls = [] # type: ignore
trainer.batch_callback_calls.append((epoch, batch_number, is_training)) # type: ignore
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=2,
validation_data_loader=self.validation_data_loader,
batch_callbacks=[FakeBatchCallback()],
)
trainer.train()
expected_calls = [
(epoch, batch_number + 1, is_train)
for epoch in range(2)
for is_train in (True, False)
for batch_number in range(len(self.instances) // 2)
]
assert trainer.batch_callback_calls == expected_calls
def test_epoch_callback_is_called_at_every_epoch(self):
class FakeEpochCallback(EpochCallback):
def __call__(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
if not hasattr(trainer, "epoch_callback_calls"):
trainer.epoch_callback_calls = [] # type: ignore
trainer.epoch_callback_calls.append(epoch) # type: ignore
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=4,
validation_data_loader=self.validation_data_loader,
epoch_callbacks=[FakeEpochCallback()],
)
trainer.train()
expected_calls = [epoch for epoch in range(-1, 4)]
assert trainer.epoch_callback_calls == expected_calls
def test_track_epoch_callback(self):
num_epochs = 4
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=num_epochs,
validation_data_loader=self.validation_data_loader,
epoch_callbacks=[TrackEpochCallback()],
)
trainer.train()
assert trainer.model.epoch == num_epochs
def test_end_callback_is_called_at_end(self):
class FakeEndCallback(EpochCallback):
def __call__(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
if not hasattr(trainer, "end_callback_calls"):
trainer.end_callback_calls = [] # type: ignore
trainer.end_callback_calls.append(epoch) # type: ignore
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=4,
validation_data_loader=self.validation_data_loader,
end_callbacks=[FakeEndCallback()],
)
trainer.train()
expected_calls = [3]
assert trainer.end_callback_calls == expected_calls
def test_trainer_callback_is_called_everywhere(self):
class FakeTrainerCallback(TrainerCallback):
def on_batch(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
if not hasattr(trainer, "batch_callback_calls"):
trainer.batch_callback_calls = [] # type: ignore
trainer.batch_callback_calls.append((epoch, batch_number, is_training)) # type: ignore
def on_epoch(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
if not hasattr(trainer, "epoch_callback_calls"):
trainer.epoch_callback_calls = [] # type: ignore
trainer.epoch_callback_calls.append(epoch) # type: ignore
def on_end(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
if not hasattr(trainer, "end_callback_calls"):
trainer.end_callback_calls = [] # type: ignore
trainer.end_callback_calls.append(epoch) # type: ignore
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=2,
validation_data_loader=self.validation_data_loader,
trainer_callbacks=[FakeTrainerCallback()],
)
trainer.train()
expected_batch_calls = [
(epoch, batch_number + 1, is_train)
for epoch in range(2)
for is_train in (True, False)
for batch_number in range(len(self.instances) // 2)
]
expected_epoch_calls = [epoch for epoch in range(-1, 2)]
expected_end_calls = [1]
assert trainer.batch_callback_calls == expected_batch_calls
assert trainer.epoch_callback_calls == expected_epoch_calls
assert trainer.end_callback_calls == expected_end_calls
def test_total_loss_is_average_of_batch_loss(self):
batches_per_epoch = 3
data_loader_custom_epoch_lazy = PyTorchDataLoader(
self.instances_lazy,
batch_size=2,
collate_fn=allennlp_collate,
batches_per_epoch=batches_per_epoch,
)
class FakeBatchCallback(BatchCallback):
def __call__(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
if not hasattr(trainer, "batch_losses"):
trainer.batch_losses = [] # type: ignore
trainer.batch_losses.append(batch_outputs[0]["loss"].item()) # type: ignore
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
data_loader_custom_epoch_lazy,
num_epochs=1,
batch_callbacks=[FakeBatchCallback()],
)
metrics = trainer.train()
assert metrics["training_loss"] == float(sum(trainer.batch_losses) / batches_per_epoch)
@requires_gpu
class TestAmpTrainer(TrainerTestBase):
@pytest.mark.parametrize(
"grad_norm, num_gradient_accumulation_steps", [(None, 1), (1.0, 1), (1.0, 2)]
)
def test_trainer_can_run_amp(self, grad_norm, num_gradient_accumulation_steps):
self.model.cuda()
trainer = GradientDescentTrainer(
self.model,
self.optimizer,
self.data_loader,
num_epochs=2,
cuda_device=0,
use_amp=True,
grad_norm=True,
num_gradient_accumulation_steps=num_gradient_accumulation_steps,
)
_ = trainer.train()
class TestSparseClipGrad(AllenNlpTestCase):
def test_sparse_clip_grad(self):
# create a sparse embedding layer, then take gradient
embedding = torch.nn.Embedding(100, 16, sparse=True)
embedding.zero_grad()
ids = (torch.rand(17) * 100).long()
# Set some of the ids to the same value so that the sparse gradient
# has repeated indices. This tests some additional logic.
ids[:5] = 5
loss = embedding(ids).sum()
loss.backward()
assert embedding.weight.grad.is_sparse
# Now try to clip the gradients.
_ = clip_grad_norm_([embedding.weight], 1.5)
# Final norm should be 1.5
grad = embedding.weight.grad.coalesce()
assert grad._values().norm(2.0).item() == pytest.approx(1.5, rel=1e-4)
| allennlp-master | tests/training/trainer_test.py |
allennlp-master | tests/training/__init__.py |
|
from typing import Dict
import torch
import numpy as np
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.model import Model
from allennlp.training.moving_average import MovingAverage, ExponentialMovingAverage
class MovingAverageTest(AllenNlpTestCase):
def test_from_params(self):
params = Params({"type": "exponential", "decay": 0.99})
_ = MovingAverage.from_params(params, parameters=[])
def test_exponential_moving_average_without_steps(self):
param1 = torch.ones(5, 3)
param2 = torch.ones(2)
moving_average = ExponentialMovingAverage(
[("param1", param1), ("param2", param2)], decay=0.9999
)
param1.data *= 5 # now all 5s
param2.data *= 10 # now all 10s
moving_average.apply()
param1.data *= 5 # now all 25s
param2.data *= 10 # now all 100s
moving_average.apply()
# Get shadow variables
moving_average.assign_average_value()
np.testing.assert_array_almost_equal(
param1, 1 * 0.9999 ** 2 + 5 * 0.9999 * 0.0001 + 25 * 0.0001
)
np.testing.assert_array_almost_equal(
param2, 1 * 0.9999 ** 2 + 10 * 0.9999 * 0.0001 + 100 * 0.0001
)
# Restore original variables
moving_average.restore()
np.testing.assert_array_almost_equal(param1, 25)
np.testing.assert_array_almost_equal(param2, 100)
def test_exponential_moving_average_num_updates(self):
param1 = torch.ones(5, 3)
param2 = torch.ones(2)
moving_average = ExponentialMovingAverage(
[("param1", param1), ("param2", param2)], decay=0.9999
)
param1.data *= 5 # now all 5s
param2.data *= 10 # now all 10s
moving_average.apply(num_updates=100) # 101 / 110 ~ 0.92 < 0.9999
param1.data *= 5 # now all 25s
param2.data *= 10 # now all 100s
moving_average.apply(num_updates=1_000_000) # 1_000_001 / 1_000_010 ~ .999991 > .9999
# Get shadow variables
moving_average.assign_average_value()
np.testing.assert_array_almost_equal(
param1, 1 * (101 / 110) * 0.9999 + 5 * (9 / 110) * 0.9999 + 25 * 0.0001
)
np.testing.assert_array_almost_equal(
param2, 1 * (101 / 110) * 0.9999 + 10 * (9 / 110) * 0.9999 + 100 * 0.0001
)
# Restore original variables
moving_average.restore()
np.testing.assert_array_almost_equal(param1, 25)
np.testing.assert_array_almost_equal(param2, 100)
def test_works_with_model(self):
class FakeModel(Model):
def __init__(self) -> None:
super().__init__(None)
self.w = torch.nn.Parameter(torch.randn(1))
def forward(self, t: torch.Tensor) -> Dict[str, torch.Tensor]: # type: ignore
return {"loss": (t * self.w).sum()}
model = FakeModel()
moving_average = ExponentialMovingAverage(model.named_parameters())
optimizer = torch.optim.SGD(list(model.parameters()), lr=0.1)
for _ in range(10):
optimizer.zero_grad()
t = torch.randn(10)
loss = model.forward(t)["loss"]
loss.backward()
optimizer.step()
moving_average.apply()
w_value = model.w.item()
shadow_value = moving_average._shadows["w"].item()
assert w_value != shadow_value
moving_average.assign_average_value()
assert model.w.item() == shadow_value
moving_average.restore()
assert model.w.item() == w_value
# Now keep training:
for _ in range(10):
optimizer.zero_grad()
t = torch.randn(10)
loss = model.forward(t)["loss"]
loss.backward()
optimizer.step()
moving_average.apply()
| allennlp-master | tests/training/moving_average_test.py |
import logging
import os
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.fields import LabelField
from allennlp.models import Model
from allennlp.training.util import make_vocab_from_params, get_metrics
logger = logging.getLogger(__name__)
@pytest.fixture(scope="module", autouse=True)
def train_util_test_reader():
@DatasetReader.register("train-util-test-reader")
class TrainUtilTestReader(DatasetReader):
def _read(self, data_path):
logger.info("...train-util-test-reader reading from %s", data_path)
for i in range(10):
yield self.text_to_instance(i)
def text_to_instance(self, index: int) -> Instance: # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
yield TrainUtilTestReader
del DatasetReader._registry[DatasetReader]["train-util-test-reader"]
class TestMakeVocabFromParams(AllenNlpTestCase):
@pytest.mark.parametrize(
"params",
[
Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"validation_data_path": "path-to-validation-file",
"test_data_path": "path-to-validation-file",
"datasets_for_vocab_creation": [],
}
),
Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"datasets_for_vocab_creation": [],
}
),
Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"validation_data_path": "path-to-validation-file",
"test_data_path": "path-to-validation-file",
"vocabulary": {"type": "empty"},
}
),
],
)
def test_no_instances_read_for_vocab(self, caplog, params):
_ = make_vocab_from_params(params, str(self.TEST_DIR))
log_messages = "\n".join([rec.message for rec in caplog.records])
assert "...train-util-test-reader reading from" not in log_messages
assert "Reading training data" not in log_messages
assert "Reading validation data" not in log_messages
assert "Reading test data" not in log_messages
def test_only_train_read_for_vocab(self, caplog):
params = Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
}
)
_ = make_vocab_from_params(params, str(self.TEST_DIR))
log_messages = "\n".join([rec.message for rec in caplog.records])
assert "...train-util-test-reader reading from path-to-training-file" in log_messages
assert "...train-util-test-reader reading from path-to-validation-file" not in log_messages
assert "...train-util-test-reader reading from path-to-test-file" not in log_messages
assert "Reading training data" in log_messages
assert "Reading validation data" not in log_messages
assert "Reading test data" not in log_messages
def test_all_datasets_read_for_vocab(self, caplog):
params = Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"validation_data_path": "path-to-validation-file",
"test_data_path": "path-to-test-file",
}
)
_ = make_vocab_from_params(params, str(self.TEST_DIR))
log_messages = "\n".join([rec.message for rec in caplog.records])
assert "...train-util-test-reader reading from path-to-training-file" in log_messages
assert "...train-util-test-reader reading from path-to-validation-file" in log_messages
assert "...train-util-test-reader reading from path-to-test-file" in log_messages
assert "Reading training data" in log_messages
assert "Reading validation data" in log_messages
assert "Reading test data" in log_messages
def test_only_specified_datasets_read_for_vocab(self, caplog):
params = Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"validation_data_path": "path-to-validation-file",
"test_data_path": "path-to-test-file",
"datasets_for_vocab_creation": ["train", "validation"],
}
)
_ = make_vocab_from_params(params, str(self.TEST_DIR))
log_messages = "\n".join([rec.message for rec in caplog.records])
assert "...train-util-test-reader reading from path-to-training-file" in log_messages
assert "...train-util-test-reader reading from path-to-validation-file" in log_messages
assert "...train-util-test-reader reading from path-to-test-file" not in log_messages
assert "Reading training data" in log_messages
assert "Reading validation data" in log_messages
assert "Reading test data" not in log_messages
def test_using_seperate_validation_reader(self, caplog):
params = Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"validation_dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"validation_data_path": "path-to-validation-file",
}
)
_ = make_vocab_from_params(params, str(self.TEST_DIR))
log_messages = "\n".join([rec.message for rec in caplog.records])
assert "Using a separate dataset reader to load validation and test data" in log_messages
def test_invalid_datasets_for_vocab_creation(self):
params = Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"validation_data_path": "path-to-validation-file",
"datasets_for_vocab_creation": ["train", "validation", "test"],
}
)
with pytest.raises(ConfigurationError, match="invalid 'datasets_for_vocab_creation' test"):
make_vocab_from_params(params, str(self.TEST_DIR))
def test_raise_error_if_directory_non_empty(self):
params = Params(
{
"dataset_reader": {"type": "train-util-test-reader"},
"train_data_path": "path-to-training-file",
"validation_data_path": "path-to-validation-file",
}
)
os.makedirs(self.TEST_DIR / "vocabulary")
with open(self.TEST_DIR / "vocabulary" / "blah", "w") as random_file:
random_file.write("BLAH!")
with pytest.raises(ConfigurationError, match="The 'vocabulary' directory in the provided"):
make_vocab_from_params(params, str(self.TEST_DIR))
def test_get_metrics(self):
class FakeModel(Model):
def forward(self, **kwargs):
return {}
model = FakeModel(None)
total_loss = 100.0
batch_loss = 10.0
num_batches = 2
metrics = get_metrics(model, total_loss, None, batch_loss, None, num_batches)
assert metrics["loss"] == float(total_loss / num_batches)
assert metrics["batch_loss"] == batch_loss
metrics = get_metrics(model, total_loss, None, None, None, num_batches)
assert metrics["loss"] == float(total_loss / num_batches)
assert "batch_loss" not in metrics
| allennlp-master | tests/training/util_test.py |
import os
import re
import time
from contextlib import contextmanager
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
from allennlp.training import Checkpointer, Trainer
class FakeTrainer(Trainer):
def __init__(self, model_state, training_states):
self._model_state = model_state
self._training_states = training_states
@contextmanager
def get_checkpoint_state(self):
yield self._model_state, self._training_states
class TestCheckpointer(AllenNlpTestCase):
def retrieve_and_delete_saved(self):
"""
Helper function for the tests below. Finds the weight and training state files in
self.TEST_DIR, parses their names for the epochs that were saved, deletes them,
and returns the saved epochs as two lists of integers.
"""
serialization_files = os.listdir(self.TEST_DIR)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
found_model_epochs = [
int(re.search(r"model_state_epoch_([0-9\.\-]+)\.th", x).group(1))
for x in model_checkpoints
]
for f in model_checkpoints:
os.remove(os.path.join(self.TEST_DIR, f))
training_checkpoints = [x for x in serialization_files if "training_state_epoch" in x]
found_training_epochs = [
int(re.search(r"training_state_epoch_([0-9\.\-]+)\.th", x).group(1))
for x in training_checkpoints
]
for f in training_checkpoints:
os.remove(os.path.join(self.TEST_DIR, f))
return sorted(found_model_epochs), sorted(found_training_epochs)
def test_default(self):
"""
Tests that the default behavior keeps just the last 2 checkpoints.
"""
default_num_to_keep = 2
num_epochs = 30
target = list(range(num_epochs - default_num_to_keep, num_epochs))
checkpointer = Checkpointer(serialization_dir=self.TEST_DIR)
for e in range(num_epochs):
checkpointer.save_checkpoint(
epoch=e,
trainer=FakeTrainer(model_state={"epoch": e}, training_states={"epoch": e}),
is_best_so_far=False,
)
models, training = self.retrieve_and_delete_saved()
assert models == training == target
def test_keep_zero(self):
checkpointer = Checkpointer(
serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=0
)
for e in range(10):
checkpointer.save_checkpoint(
epoch=e,
trainer=FakeTrainer(model_state={"epoch": e}, training_states={"epoch": e}),
is_best_so_far=True,
)
files = os.listdir(self.TEST_DIR)
assert "model_state_epoch_1.th" not in files
assert "training_state_epoch_1.th" not in files
def test_with_time(self):
"""
Tests that keep_serialized_model_every_num_seconds parameter causes a checkpoint to be saved
after enough time has elapsed between epochs.
"""
num_to_keep = 10
num_epochs = 30
target = list(range(num_epochs - num_to_keep, num_epochs))
pauses = [5, 18, 26]
target = sorted(set(target + pauses))
checkpointer = Checkpointer(
serialization_dir=self.TEST_DIR,
num_serialized_models_to_keep=num_to_keep,
keep_serialized_model_every_num_seconds=1,
)
for e in range(num_epochs):
if e in pauses:
time.sleep(2)
checkpointer.save_checkpoint(
epoch=e,
trainer=FakeTrainer(model_state={"epoch": e}, training_states={"epoch": e}),
is_best_so_far=False,
)
models, training = self.retrieve_and_delete_saved()
assert models == training == target
def test_registered_subclass(self):
"""
Tests that registering Checkpointer subclasses works correctly.
"""
serialization_dir = str(self.TEST_DIR)
@Checkpointer.register("checkpointer_subclass")
class CheckpointerSubclass(Checkpointer):
def __init__(self, x: int, y: int) -> None:
super().__init__(serialization_dir)
self.x = x
self.y = y
sub_inst = Checkpointer.from_params(
Params({"type": "checkpointer_subclass", "x": 1, "y": 3})
)
assert sub_inst.__class__ == CheckpointerSubclass
assert sub_inst.x == 1 and sub_inst.y == 3
def test_base_class_from_params(self):
Checkpointer.from_params(Params({}), serialization_dir=self.TEST_DIR)
| allennlp-master | tests/training/checkpointer_test.py |
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
run_distributed_test,
global_distributed_metric,
)
from allennlp.training.metrics import Average
class AverageTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.metric = Average()
@multi_device
def test_distributed_average(self, device: str):
device_ids = [-1, -1] if device == "cpu" else [0, 1]
metric_kwargs = {
"value": [1.0, 2.0],
}
run_distributed_test(
device_ids,
global_distributed_metric,
self.metric,
metric_kwargs,
1.5,
exact=True,
)
| allennlp-master | tests/training/metrics/average_test.py |
from typing import Any, Dict, List, Tuple, Union
import pytest
import torch
from torch.testing import assert_allclose
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import CategoricalAccuracy
class CategoricalAccuracyTest(AllenNlpTestCase):
@multi_device
def test_categorical_accuracy(self, device: str):
accuracy = CategoricalAccuracy()
predictions = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
targets = torch.tensor([0, 3], device=device)
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric()
assert actual_accuracy == 0.50
@multi_device
def test_top_k_categorical_accuracy(self, device: str):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
targets = torch.tensor([0, 3], device=device)
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric()
assert actual_accuracy == 1.0
@multi_device
def test_top_k_categorical_accuracy_accumulates_and_resets_correctly(self, device: str):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
targets = torch.tensor([0, 3], device=device)
accuracy(predictions, targets)
accuracy(predictions, targets)
accuracy(predictions, torch.tensor([4, 4], device=device))
accuracy(predictions, torch.tensor([4, 4], device=device))
actual_accuracy = accuracy.get_metric(reset=True)
assert actual_accuracy == 0.50
assert accuracy.correct_count == 0.0
assert accuracy.total_count == 0.0
@multi_device
def test_top_k_categorical_accuracy_respects_mask(self, device: str):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.2, 0.5, 0.2, 0.0]],
device=device,
)
targets = torch.tensor([0, 3, 0], device=device)
mask = torch.tensor([False, True, True], device=device)
accuracy(predictions, targets, mask)
actual_accuracy = accuracy.get_metric()
assert_allclose(actual_accuracy, 0.50)
@multi_device
def test_top_k_categorical_accuracy_works_for_sequences(self, device: str):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.tensor(
[
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]],
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]],
],
device=device,
)
targets = torch.tensor([[0, 3, 4], [0, 1, 4]], device=device)
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric(reset=True)
assert_allclose(actual_accuracy, 0.6666666)
# Test the same thing but with a mask:
mask = torch.tensor([[False, True, True], [True, False, True]], device=device)
accuracy(predictions, targets, mask)
actual_accuracy = accuracy.get_metric(reset=True)
assert_allclose(actual_accuracy, 0.50)
@multi_device
def test_top_k_categorical_accuracy_catches_exceptions(self, device: str):
accuracy = CategoricalAccuracy()
predictions = torch.rand([5, 7], device=device)
out_of_range_labels = torch.tensor([10, 3, 4, 0, 1], device=device)
with pytest.raises(ConfigurationError):
accuracy(predictions, out_of_range_labels)
@multi_device
def test_tie_break_categorical_accuracy(self, device: str):
accuracy = CategoricalAccuracy(tie_break=True)
predictions = torch.tensor(
[[0.35, 0.25, 0.35, 0.35, 0.35], [0.1, 0.6, 0.1, 0.2, 0.2], [0.1, 0.0, 0.1, 0.2, 0.2]],
device=device,
)
# Test without mask:
targets = torch.tensor([2, 1, 4], device=device)
accuracy(predictions, targets)
assert accuracy.get_metric(reset=True) == (0.25 + 1 + 0.5) / 3.0
# # # Test with mask
mask = torch.tensor([True, False, True], device=device)
targets = torch.tensor([2, 1, 4], device=device)
accuracy(predictions, targets, mask)
assert accuracy.get_metric(reset=True) == (0.25 + 0.5) / 2.0
# # Test tie-break with sequence
predictions = torch.tensor(
[
[
[0.35, 0.25, 0.35, 0.35, 0.35],
[0.1, 0.6, 0.1, 0.2, 0.2],
[0.1, 0.0, 0.1, 0.2, 0.2],
],
[
[0.35, 0.25, 0.35, 0.35, 0.35],
[0.1, 0.6, 0.1, 0.2, 0.2],
[0.1, 0.0, 0.1, 0.2, 0.2],
],
],
device=device,
)
targets = torch.tensor(
[[0, 1, 3], [0, 3, 4]], device=device # 0.25 + 1 + 0.5 # 0.25 + 0 + 0.5 = 2.5
)
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric(reset=True)
assert_allclose(actual_accuracy, 2.5 / 6.0)
@multi_device
def test_top_k_and_tie_break_together_catches_exceptions(self, device: str):
with pytest.raises(ConfigurationError):
CategoricalAccuracy(top_k=2, tie_break=True)
@multi_device
def test_incorrect_top_k_catches_exceptions(self, device: str):
with pytest.raises(ConfigurationError):
CategoricalAccuracy(top_k=0)
@multi_device
def test_does_not_divide_by_zero_with_no_count(self, device: str):
accuracy = CategoricalAccuracy()
assert accuracy.get_metric() == pytest.approx(0.0)
def test_distributed_accuracy(self):
predictions = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
targets = [torch.tensor([0]), torch.tensor([3])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_accuracy = 0.5
run_distributed_test(
[-1, -1],
global_distributed_metric,
CategoricalAccuracy(),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_distributed_accuracy_unequal_batches(self):
predictions = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]]),
torch.tensor([[0.1, 0.2, 0.5, 0.2, 0.0]]),
]
targets = [torch.tensor([0, 3]), torch.tensor([0])]
mask = [torch.tensor([False, True]), torch.tensor([True])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets, "mask": mask}
desired_accuracy = 0.5
run_distributed_test(
[-1, -1],
global_distributed_metric,
CategoricalAccuracy(top_k=2),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_multiple_distributed_runs(self):
predictions = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
targets = [torch.tensor([0]), torch.tensor([3])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_accuracy = 0.5
run_distributed_test(
[-1, -1],
multiple_runs,
CategoricalAccuracy(),
metric_kwargs,
desired_accuracy,
exact=True,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: CategoricalAccuracy,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert desired_values == metric.get_metric()
| allennlp-master | tests/training/metrics/categorical_accuracy_test.py |
from typing import Any, Dict, Union
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
run_distributed_test,
)
from allennlp.training.metrics import ROUGE
class RougeTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.metric = ROUGE(exclude_indices={0})
def f1(self, r, p):
if r == p == 0:
return 0
return 2 * r * p / (r + p)
@multi_device
def test_rouge(self, device: str):
self.metric.reset()
predictions = torch.tensor([[1, 0, 1, 2], [1, 0, 3, 0], [1, 2, 3, 0]], device=device)
targets = torch.tensor([[2, 0, 1, 2], [1, 2, 1, 0], [1, 0, 2, 3]], device=device)
self.metric(predictions, targets)
metrics = self.metric.get_metric()
assert self.metric._total_sequence_count == 3
# ROUGE-N
# Unigram
unigram_recall = self.metric._total_rouge_n_recalls[1]
assert unigram_recall == 2 / 3 + 1 / 3 + 3 / 3
unigram_precision = self.metric._total_rouge_n_precisions[1]
assert unigram_precision == 2 / 3 + 1 / 2 + 3 / 3
unigram_f1 = self.metric._total_rouge_n_f1s[1]
assert unigram_f1 == self.f1(2 / 3, 2 / 3) + self.f1(1 / 2, 1 / 3) + self.f1(3 / 3, 3 / 3)
assert metrics["ROUGE-1_R"] == unigram_recall / self.metric._total_sequence_count
assert metrics["ROUGE-1_P"] == unigram_precision / self.metric._total_sequence_count
assert metrics["ROUGE-1_F1"] == unigram_f1 / self.metric._total_sequence_count
# Bigram
bigram_recall = self.metric._total_rouge_n_recalls[2]
assert bigram_recall == 1 / 1 + 0 / 2 + 1 / 1
bigram_precision = self.metric._total_rouge_n_precisions[2]
assert bigram_precision == 1 / 1 + 0 + 1 / 2
bigram_f1 = self.metric._total_rouge_n_f1s[2]
assert bigram_f1 == self.f1(1 / 1, 1 / 1) + self.f1(0, 0 / 2) + self.f1(1 / 2, 1 / 1)
assert metrics["ROUGE-2_R"] == bigram_recall / self.metric._total_sequence_count
assert metrics["ROUGE-2_P"] == bigram_precision / self.metric._total_sequence_count
assert metrics["ROUGE-2_F1"] == bigram_f1 / self.metric._total_sequence_count
# ROUGE-L
assert self.metric._total_rouge_l_f1 == self.f1(2 / 3, 2 / 3) + self.f1(
1 / 3, 1 / 2
) + self.f1(3 / 3, 3 / 3)
assert (
metrics["ROUGE-L"] == self.metric._total_rouge_l_f1 / self.metric._total_sequence_count
)
def test_rouge_with_zero_counts(self):
self.metric.reset()
metrics = self.metric.get_metric()
for score in metrics.values():
assert score == 0.0
def test_distributed_rouge(self):
predictions = [torch.tensor([[1, 0, 1, 2], [1, 0, 3, 0]]), torch.tensor([[1, 2, 3, 0]])]
targets = [torch.tensor([[2, 0, 1, 2], [1, 2, 1, 0]]), torch.tensor([[1, 0, 2, 3]])]
metric_kwargs = {"predictions": predictions, "gold_targets": targets}
desired_values = {}
desired_values["unigram_recall"] = 2 / 3 + 1 / 3 + 3 / 3
desired_values["unigram_precision"] = 2 / 3 + 1 / 2 + 3 / 3
desired_values["unigram_f1"] = (
self.f1(2 / 3, 2 / 3) + self.f1(1 / 2, 1 / 3) + self.f1(3 / 3, 3 / 3)
)
desired_values["bigram_recall"] = 1 / 1 + 0 / 2 + 1 / 1
desired_values["bigram_precision"] = 1 / 1 + 0 + 1 / 2
desired_values["bigram_f1"] = (
self.f1(1 / 1, 1 / 1) + self.f1(0, 0 / 2) + self.f1(1 / 2, 1 / 1)
)
desired_values["total_rouge_l_f1"] = (
self.f1(2 / 3, 2 / 3) + self.f1(1 / 3, 1 / 2) + self.f1(3 / 3, 3 / 3)
)
run_distributed_test(
[-1, -1],
global_distributed_rouge,
ROUGE(exclude_indices={0}),
metric_kwargs,
desired_values,
)
def global_distributed_rouge(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: ROUGE,
metric_kwargs: Dict[str, Any],
desired_values: Dict[str, Any],
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
metric(**kwargs)
metrics = metric.get_metric()
# Unigram
unigram_recall = metric._total_rouge_n_recalls[1]
assert_allclose(unigram_recall, desired_values["unigram_recall"])
unigram_precision = metric._total_rouge_n_precisions[1]
assert_allclose(unigram_precision, desired_values["unigram_precision"])
unigram_f1 = metric._total_rouge_n_f1s[1]
assert_allclose(unigram_f1, desired_values["unigram_f1"])
assert metrics["ROUGE-1_R"] == unigram_recall / metric._total_sequence_count
assert metrics["ROUGE-1_P"] == unigram_precision / metric._total_sequence_count
assert metrics["ROUGE-1_F1"] == unigram_f1 / metric._total_sequence_count
# Bigram
bigram_recall = metric._total_rouge_n_recalls[2]
assert_allclose(bigram_recall, desired_values["bigram_recall"])
bigram_precision = metric._total_rouge_n_precisions[2]
assert_allclose(bigram_precision, desired_values["bigram_precision"])
bigram_f1 = metric._total_rouge_n_f1s[2]
assert_allclose(bigram_f1, desired_values["bigram_f1"])
assert metrics["ROUGE-2_R"] == bigram_recall / metric._total_sequence_count
assert metrics["ROUGE-2_P"] == bigram_precision / metric._total_sequence_count
assert metrics["ROUGE-2_F1"] == bigram_f1 / metric._total_sequence_count
# ROUGE-L
assert_allclose(metric._total_rouge_l_f1, desired_values["total_rouge_l_f1"])
assert metrics["ROUGE-L"] == metric._total_rouge_l_f1 / metric._total_sequence_count
| allennlp-master | tests/training/metrics/rouge_test.py |
from typing import Any, Dict, List, Tuple, Union
import torch
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
run_distributed_test,
global_distributed_metric,
)
from allennlp.training.metrics import AttachmentScores
class AttachmentScoresTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.scorer = AttachmentScores()
self.predictions = torch.Tensor([[0, 1, 3, 5, 2, 4], [0, 3, 2, 1, 0, 0]])
self.gold_indices = torch.Tensor([[0, 1, 3, 5, 2, 4], [0, 3, 2, 1, 0, 0]])
self.label_predictions = torch.Tensor([[0, 5, 2, 1, 4, 2], [0, 4, 8, 2, 0, 0]])
self.gold_labels = torch.Tensor([[0, 5, 2, 1, 4, 2], [0, 4, 8, 2, 0, 0]])
self.mask = torch.tensor(
[[True, True, True, True, True, True], [True, True, True, True, False, False]]
)
def _send_tensors_to_device(self, device: str):
self.predictions = self.predictions.to(device)
self.gold_indices = self.gold_indices.to(device)
self.label_predictions = self.label_predictions.to(device)
self.gold_labels = self.gold_labels.to(device)
self.mask = self.mask.to(device)
@multi_device
def test_perfect_scores(self, device: str):
self._send_tensors_to_device(device)
self.scorer(
self.predictions, self.label_predictions, self.gold_indices, self.gold_labels, self.mask
)
for value in self.scorer.get_metric().values():
assert value == 1.0
@multi_device
def test_unlabeled_accuracy_ignores_incorrect_labels(self, device: str):
self._send_tensors_to_device(device)
label_predictions = self.label_predictions
# Change some stuff so our 4 of our label predictions are wrong.
label_predictions[0, 3:] = 3
label_predictions[1, 0] = 7
self.scorer(
self.predictions, label_predictions, self.gold_indices, self.gold_labels, self.mask
)
metrics = self.scorer.get_metric()
assert metrics["UAS"] == 1.0
assert metrics["UEM"] == 1.0
# 4 / 12 labels were wrong and 2 positions
# are masked, so 6/10 = 0.6 LAS.
assert metrics["LAS"] == 0.6
# Neither should have labeled exact match.
assert metrics["LEM"] == 0.0
@multi_device
def test_labeled_accuracy_is_affected_by_incorrect_heads(self, device: str):
self._send_tensors_to_device(device)
predictions = self.predictions
# Change some stuff so our 4 of our predictions are wrong.
predictions[0, 3:] = 3
predictions[1, 0] = 7
# This one is in the padded part, so it shouldn't affect anything.
predictions[1, 5] = 7
self.scorer(
predictions, self.label_predictions, self.gold_indices, self.gold_labels, self.mask
)
metrics = self.scorer.get_metric()
# 4 heads are incorrect, so the unlabeled score should be
# 6/10 = 0.6 LAS.
assert metrics["UAS"] == 0.6
# All the labels were correct, but some heads
# were wrong, so the LAS should equal the UAS.
assert metrics["LAS"] == 0.6
# Neither batch element had a perfect labeled or unlabeled EM.
assert metrics["LEM"] == 0.0
assert metrics["UEM"] == 0.0
@multi_device
def test_attachment_scores_can_ignore_labels(self, device: str):
self._send_tensors_to_device(device)
scorer = AttachmentScores(ignore_classes=[1])
label_predictions = self.label_predictions
# Change the predictions where the gold label is 1;
# as we are ignoring 1, we should still get a perfect score.
label_predictions[0, 3] = 2
scorer(self.predictions, label_predictions, self.gold_indices, self.gold_labels, self.mask)
for value in scorer.get_metric().values():
assert value == 1.0
def test_distributed_attachment_scores(self):
predictions = [torch.Tensor([[0, 1, 3, 5, 2, 4]]), torch.Tensor([[0, 3, 2, 1, 0, 0]])]
gold_indices = [torch.Tensor([[0, 1, 3, 5, 2, 4]]), torch.Tensor([[0, 3, 2, 1, 0, 0]])]
label_predictions = [
torch.Tensor([[0, 5, 2, 3, 3, 3]]),
torch.Tensor([[7, 4, 8, 2, 0, 0]]),
]
gold_labels = [torch.Tensor([[0, 5, 2, 1, 4, 2]]), torch.Tensor([[0, 4, 8, 2, 0, 0]])]
mask = [
torch.tensor([[True, True, True, True, True, True]]),
torch.tensor([[True, True, True, True, False, False]]),
]
metric_kwargs = {
"predicted_indices": predictions,
"gold_indices": gold_indices,
"predicted_labels": label_predictions,
"gold_labels": gold_labels,
"mask": mask,
}
desired_metrics = {
"UAS": 1.0,
"LAS": 0.6,
"UEM": 1.0,
"LEM": 0.0,
}
run_distributed_test(
[-1, -1],
global_distributed_metric,
AttachmentScores(),
metric_kwargs,
desired_metrics,
exact=True,
)
def test_multiple_distributed_runs(self):
predictions = [torch.Tensor([[0, 1, 3, 5, 2, 4]]), torch.Tensor([[0, 3, 2, 1, 0, 0]])]
gold_indices = [torch.Tensor([[0, 1, 3, 5, 2, 4]]), torch.Tensor([[0, 3, 2, 1, 0, 0]])]
label_predictions = [
torch.Tensor([[0, 5, 2, 3, 3, 3]]),
torch.Tensor([[7, 4, 8, 2, 0, 0]]),
]
gold_labels = [torch.Tensor([[0, 5, 2, 1, 4, 2]]), torch.Tensor([[0, 4, 8, 2, 0, 0]])]
mask = [
torch.tensor([[True, True, True, True, True, True]]),
torch.tensor([[True, True, True, True, False, False]]),
]
metric_kwargs = {
"predicted_indices": predictions,
"gold_indices": gold_indices,
"predicted_labels": label_predictions,
"gold_labels": gold_labels,
"mask": mask,
}
desired_metrics = {
"UAS": 1.0,
"LAS": 0.6,
"UEM": 1.0,
"LEM": 0.0,
}
run_distributed_test(
[-1, -1],
multiple_runs,
AttachmentScores(),
metric_kwargs,
desired_metrics,
exact=True,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: AttachmentScores,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
metrics = metric.get_metric()
for key in metrics:
assert desired_values[key] == metrics[key]
| allennlp-master | tests/training/metrics/attachment_scores_test.py |
import numpy as np
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
# global_distributed_metric,
# run_distributed_test,
)
from allennlp.training.metrics import Covariance
class CovarianceTest(AllenNlpTestCase):
@multi_device
def test_covariance_unmasked_computation(self, device: str):
covariance = Covariance()
batch_size = 100
num_labels = 10
predictions = torch.randn(batch_size, num_labels, device=device)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels, device=device)
stride = 10
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
# Flatten the predictions and labels thus far, so numpy treats them as
# independent observations.
expected_covariance = np.cov(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
)[0, 1]
covariance(timestep_predictions, timestep_labels)
assert_allclose(expected_covariance, covariance.get_metric())
# Test reset
covariance.reset()
covariance(predictions, labels)
assert_allclose(
np.cov(predictions.view(-1).cpu().numpy(), labels.view(-1).cpu().numpy())[0, 1],
covariance.get_metric(),
)
@multi_device
def test_covariance_masked_computation(self, device: str):
covariance = Covariance()
batch_size = 100
num_labels = 10
predictions = torch.randn(batch_size, num_labels, device=device)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels, device=device)
# Random binary mask
mask = torch.randint(0, 2, size=(batch_size, num_labels), device=device).bool()
stride = 10
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
timestep_mask = mask[stride * i : stride * (i + 1), :]
# Flatten the predictions, labels, and mask thus far, so numpy treats them as
# independent observations.
expected_covariance = np.cov(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
fweights=mask[: stride * (i + 1), :].view(-1).cpu().numpy(),
)[0, 1]
covariance(timestep_predictions, timestep_labels, timestep_mask)
assert_allclose(expected_covariance, covariance.get_metric())
# Test reset
covariance.reset()
covariance(predictions, labels, mask)
assert_allclose(
np.cov(
predictions.view(-1).cpu().numpy(),
labels.view(-1).cpu().numpy(),
fweights=mask.view(-1).cpu().numpy(),
)[0, 1],
covariance.get_metric(),
)
# Commenting in order to revisit distributed covariance later.
# def test_distributed_covariance(self):
# batch_size = 10
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# # Random binary mask
# mask = torch.randint(0, 2, size=(batch_size, num_labels)).bool()
# expected_covariance = np.cov(
# predictions.view(-1).cpu().numpy(),
# labels.view(-1).cpu().numpy(),
# fweights=mask.view(-1).cpu().numpy(),
# )[0, 1]
# predictions = [predictions[:5], predictions[5:]]
# labels = [labels[:5], labels[5:]]
# mask = [mask[:5], mask[5:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels, "mask": mask}
# run_distributed_test(
# [-1, -1],
# global_distributed_metric,
# Covariance(),
# metric_kwargs,
# expected_covariance,
# exact=(0.0001, 1e-01),
# )
# def test_distributed_covariance_unequal_batches(self):
# batch_size = 10
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# # Random binary mask
# mask = torch.randint(0, 2, size=(batch_size, num_labels)).bool()
# expected_covariance = np.cov(
# predictions.view(-1).cpu().numpy(),
# labels.view(-1).cpu().numpy(),
# fweights=mask.view(-1).cpu().numpy(),
# )[0, 1]
# predictions = [predictions[:6], predictions[6:]]
# labels = [labels[:6], labels[6:]]
# mask = [mask[:6], mask[6:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels, "mask": mask}
# run_distributed_test(
# [-1, -1],
# global_distributed_metric,
# Covariance(),
# metric_kwargs,
# expected_covariance,
# exact=(0.0001, 1e-01),
# )
# def test_multiple_runs(self):
# batch_size = 12
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# stride = 1
# expected_covariances = []
# for i in range(batch_size // stride):
# timestep_predictions = predictions[stride * i : stride * (i + 1), :]
# timestep_labels = labels[stride * i : stride * (i + 1), :]
# # Flatten the predictions and labels thus far, so numpy treats them as
# # independent observations.
# expected_covariance = np.cov(
# predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
# labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
# )[0, 1]
# expected_covariances.append(expected_covariance)
# predictions = [predictions[:6], predictions[6:]]
# labels = [labels[:6], labels[6:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels}
# run_distributed_test(
# [-1, -1],
# multiple_runs,
# Covariance(),
# batch_size,
# stride,
# metric_kwargs,
# expected_covariances,
# exact=(0.0001, 1e-01),
# )
# def multiple_runs(
# global_rank: int,
# world_size: int,
# gpu_id: Union[int, torch.device],
# covariance: Covariance,
# batch_size: int,
# stride: int,
# metric_kwargs: Dict[str, List[Any]],
# expected_covariances: List[float],
# exact: Union[bool, Tuple[float, float]] = True,
# ):
# kwargs = {}
# # Use the arguments meant for the process with rank `global_rank`.
# for argname in metric_kwargs:
# kwargs[argname] = metric_kwargs[argname][global_rank]
# predictions = kwargs["predictions"]
# labels = kwargs["gold_labels"]
# batch_size = predictions.shape[0]
# stride = stride // world_size
# for i in range(batch_size // stride):
# timestep_predictions = predictions[stride * i : stride * (i + 1), :]
# timestep_labels = labels[stride * i : stride * (i + 1), :]
# # Flatten the predictions and labels thus far, so numpy treats them as
# # independent observations.
# covariance(timestep_predictions, timestep_labels)
# assert_allclose(expected_covariances[i], covariance.get_metric(), rtol=exact[0], atol=exact[1])
| allennlp-master | tests/training/metrics/covariance_test.py |
import torch
from torch.testing import assert_allclose
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase, multi_device
from allennlp.data import Vocabulary
from allennlp.training.metrics import SpanBasedF1Measure, Metric
class SpanBasedF1Test(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
vocab = Vocabulary()
vocab.add_token_to_namespace("O", "tags")
vocab.add_token_to_namespace("B-ARG1", "tags")
vocab.add_token_to_namespace("I-ARG1", "tags")
vocab.add_token_to_namespace("B-ARG2", "tags")
vocab.add_token_to_namespace("I-ARG2", "tags")
vocab.add_token_to_namespace("B-V", "tags")
vocab.add_token_to_namespace("I-V", "tags")
vocab.add_token_to_namespace("U-ARG1", "tags")
vocab.add_token_to_namespace("U-ARG2", "tags")
vocab.add_token_to_namespace("B-C-ARG1", "tags")
vocab.add_token_to_namespace("I-C-ARG1", "tags")
vocab.add_token_to_namespace("B-ARGM-ADJ", "tags")
vocab.add_token_to_namespace("I-ARGM-ADJ", "tags")
# BMES.
vocab.add_token_to_namespace("B", "bmes_tags")
vocab.add_token_to_namespace("M", "bmes_tags")
vocab.add_token_to_namespace("E", "bmes_tags")
vocab.add_token_to_namespace("S", "bmes_tags")
self.vocab = vocab
@multi_device
def test_span_metrics_are_computed_correcly_with_prediction_map(self, device: str):
# In this example, datapoint1 only has access to ARG1 and V labels,
# whereas datapoint2 only has access to ARG2 and V labels.
# gold_labels = [["O", "B-ARG1", "I-ARG1", "O", "B-V", "O"],
# ["B-ARG2", "I-ARG2", "O", "B-V", "I-V", "O"]]
gold_indices = [[0, 1, 2, 0, 3, 0], [1, 2, 0, 3, 4, 0]]
prediction_map_indices = [[0, 1, 2, 5, 6], [0, 3, 4, 5, 6]]
gold_tensor = torch.tensor(gold_indices, device=device)
prediction_map_tensor = torch.tensor(prediction_map_indices, device=device)
prediction_tensor = torch.rand([2, 6, 5], device=device)
prediction_tensor[0, 0, 0] = 1
prediction_tensor[0, 1, 1] = 1 # (True Positive - ARG1
prediction_tensor[0, 2, 2] = 1 # *)
prediction_tensor[0, 3, 0] = 1
prediction_tensor[0, 4, 3] = 1 # (True Positive - V)
prediction_tensor[0, 5, 1] = 1 # (False Positive - ARG1)
prediction_tensor[1, 0, 0] = 1 # (False Negative - ARG2
prediction_tensor[1, 1, 0] = 1 # *)
prediction_tensor[1, 2, 0] = 1
prediction_tensor[1, 3, 3] = 1 # (True Positive - V
prediction_tensor[1, 4, 4] = 1 # *)
prediction_tensor[1, 5, 1] = 1 # (False Positive - ARG2)
metric = SpanBasedF1Measure(self.vocab, "tags")
metric(prediction_tensor, gold_tensor, prediction_map=prediction_map_tensor)
assert metric._true_positives["ARG1"] == 1
assert metric._true_positives["ARG2"] == 0
assert metric._true_positives["V"] == 2
assert "O" not in metric._true_positives.keys()
assert metric._false_negatives["ARG1"] == 0
assert metric._false_negatives["ARG2"] == 1
assert metric._false_negatives["V"] == 0
assert "O" not in metric._false_negatives.keys()
assert metric._false_positives["ARG1"] == 1
assert metric._false_positives["ARG2"] == 1
assert metric._false_positives["V"] == 0
assert "O" not in metric._false_positives.keys()
# Check things are accumulating correctly.
metric(prediction_tensor, gold_tensor, prediction_map=prediction_map_tensor)
assert metric._true_positives["ARG1"] == 2
assert metric._true_positives["ARG2"] == 0
assert metric._true_positives["V"] == 4
assert "O" not in metric._true_positives.keys()
assert metric._false_negatives["ARG1"] == 0
assert metric._false_negatives["ARG2"] == 2
assert metric._false_negatives["V"] == 0
assert "O" not in metric._false_negatives.keys()
assert metric._false_positives["ARG1"] == 2
assert metric._false_positives["ARG2"] == 2
assert metric._false_positives["V"] == 0
assert "O" not in metric._false_positives.keys()
metric_dict = metric.get_metric()
assert_allclose(metric_dict["recall-ARG2"], 0.0)
assert_allclose(metric_dict["precision-ARG2"], 0.0)
assert_allclose(metric_dict["f1-measure-ARG2"], 0.0)
assert_allclose(metric_dict["recall-ARG1"], 1.0)
assert_allclose(metric_dict["precision-ARG1"], 0.5)
assert_allclose(metric_dict["f1-measure-ARG1"], 0.666666666)
assert_allclose(metric_dict["recall-V"], 1.0)
assert_allclose(metric_dict["precision-V"], 1.0)
assert_allclose(metric_dict["f1-measure-V"], 1.0)
assert_allclose(metric_dict["recall-overall"], 0.75)
assert_allclose(metric_dict["precision-overall"], 0.6)
assert_allclose(metric_dict["f1-measure-overall"], 0.666666666)
@multi_device
def test_span_metrics_are_computed_correctly(self, device: str):
gold_labels = ["O", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "O", "O", "O"]
gold_indices = [self.vocab.get_token_index(x, "tags") for x in gold_labels]
gold_tensor = torch.tensor([gold_indices], device=device)
prediction_tensor = torch.rand([2, 9, self.vocab.get_vocab_size("tags")], device=device)
# Test that the span measure ignores completely masked sequences by
# passing a mask with a fully masked row.
mask = torch.tensor(
[
[True, True, True, True, True, True, True, True, True],
[False, False, False, False, False, False, False, False, False],
],
device=device,
)
prediction_tensor[:, 0, 0] = 1
prediction_tensor[:, 1, 1] = 1 # (True positive - ARG1
prediction_tensor[:, 2, 2] = 1 # *)
prediction_tensor[:, 3, 0] = 1
prediction_tensor[:, 4, 0] = 1 # (False Negative - ARG2
prediction_tensor[:, 5, 0] = 1 # *)
prediction_tensor[:, 6, 0] = 1
prediction_tensor[:, 7, 1] = 1 # (False Positive - ARG1
prediction_tensor[:, 8, 2] = 1 # *)
metric = SpanBasedF1Measure(self.vocab, "tags")
metric(prediction_tensor, gold_tensor, mask)
assert metric._true_positives["ARG1"] == 1
assert metric._true_positives["ARG2"] == 0
assert "O" not in metric._true_positives.keys()
assert metric._false_negatives["ARG1"] == 0
assert metric._false_negatives["ARG2"] == 1
assert "O" not in metric._false_negatives.keys()
assert metric._false_positives["ARG1"] == 1
assert metric._false_positives["ARG2"] == 0
assert "O" not in metric._false_positives.keys()
# Check things are accumulating correctly.
metric(prediction_tensor, gold_tensor, mask)
assert metric._true_positives["ARG1"] == 2
assert metric._true_positives["ARG2"] == 0
assert "O" not in metric._true_positives.keys()
assert metric._false_negatives["ARG1"] == 0
assert metric._false_negatives["ARG2"] == 2
assert "O" not in metric._false_negatives.keys()
assert metric._false_positives["ARG1"] == 2
assert metric._false_positives["ARG2"] == 0
assert "O" not in metric._false_positives.keys()
metric_dict = metric.get_metric()
assert_allclose(metric_dict["recall-ARG2"], 0.0)
assert_allclose(metric_dict["precision-ARG2"], 0.0)
assert_allclose(metric_dict["f1-measure-ARG2"], 0.0)
assert_allclose(metric_dict["recall-ARG1"], 1.0)
assert_allclose(metric_dict["precision-ARG1"], 0.5)
assert_allclose(metric_dict["f1-measure-ARG1"], 0.666666666)
assert_allclose(metric_dict["recall-overall"], 0.5)
assert_allclose(metric_dict["precision-overall"], 0.5)
assert_allclose(metric_dict["f1-measure-overall"], 0.5)
@multi_device
def test_bmes_span_metrics_are_computed_correctly(self, device: str):
# (bmes_tags) B:0, M:1, E:2, S:3.
# [S, B, M, E, S]
# [S, S, S, S, S]
gold_indices = [[3, 0, 1, 2, 3], [3, 3, 3, 3, 3]]
gold_tensor = torch.tensor(gold_indices, device=device)
prediction_tensor = torch.rand([2, 5, 4], device=device)
# [S, B, E, S, S]
# TP: 2, FP: 2, FN: 1.
prediction_tensor[0, 0, 3] = 1 # (True positive)
prediction_tensor[0, 1, 0] = 1 # (False positive
prediction_tensor[0, 2, 2] = 1 # *)
prediction_tensor[0, 3, 3] = 1 # (False positive)
prediction_tensor[0, 4, 3] = 1 # (True positive)
# [B, E, S, B, E]
# TP: 1, FP: 2, FN: 4.
prediction_tensor[1, 0, 0] = 1 # (False positive
prediction_tensor[1, 1, 2] = 1 # *)
prediction_tensor[1, 2, 3] = 1 # (True positive)
prediction_tensor[1, 3, 0] = 1 # (False positive
prediction_tensor[1, 4, 2] = 1 # *)
metric = SpanBasedF1Measure(self.vocab, "bmes_tags", label_encoding="BMES")
metric(prediction_tensor, gold_tensor)
# TP: 3, FP: 4, FN: 5.
metric_dict = metric.get_metric()
assert_allclose(metric_dict["recall-overall"], 0.375, rtol=0.001, atol=1e-03)
assert_allclose(metric_dict["precision-overall"], 0.428, rtol=0.001, atol=1e-03)
assert_allclose(metric_dict["f1-measure-overall"], 0.4, rtol=0.001, atol=1e-03)
@multi_device
def test_span_f1_can_build_from_params(self, device: str):
params = Params({"type": "span_f1", "tag_namespace": "tags", "ignore_classes": ["V"]})
metric = Metric.from_params(params=params, vocabulary=self.vocab)
assert metric._ignore_classes == ["V"] # type: ignore
assert metric._label_vocabulary == self.vocab.get_index_to_token_vocabulary( # type: ignore
"tags"
)
@multi_device
def test_span_f1_accepts_tags_to_spans_function_argument(self, device: str):
def mock_tags_to_spans_function(tag_sequence, classes_to_ignore=None):
return [("mock", (42, 42))]
# Should be ignore.
bio_tags = ["B-ARG1", "O", "B-C-ARG1", "B-V", "B-ARGM-ADJ", "O"]
gold_indices = [self.vocab.get_token_index(x, "tags") for x in bio_tags]
gold_tensor = torch.tensor([gold_indices], device=device)
prediction_tensor = torch.rand([1, 6, self.vocab.get_vocab_size("tags")], device=device)
metric = SpanBasedF1Measure(
self.vocab,
"tags",
label_encoding=None,
tags_to_spans_function=mock_tags_to_spans_function,
)
metric(prediction_tensor, gold_tensor)
metric_dict = metric.get_metric()
assert_allclose(metric_dict["recall-overall"], 1.0)
assert_allclose(metric_dict["precision-overall"], 1.0)
assert_allclose(metric_dict["f1-measure-overall"], 1.0)
with pytest.raises(ConfigurationError):
SpanBasedF1Measure(self.vocab, label_encoding="INVALID")
with pytest.raises(ConfigurationError):
SpanBasedF1Measure(self.vocab, tags_to_spans_function=mock_tags_to_spans_function)
with pytest.raises(ConfigurationError):
SpanBasedF1Measure(self.vocab, label_encoding=None, tags_to_spans_function=None)
| allennlp-master | tests/training/metrics/span_based_f1_measure_test.py |
from typing import Optional
import numpy as np
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
# global_distributed_metric,
# run_distributed_test,
)
from allennlp.training.metrics import PearsonCorrelation
def pearson_corrcoef(
predictions: np.ndarray, labels: np.ndarray, fweights: Optional[np.ndarray] = None
):
covariance_matrices = np.cov(predictions, labels, fweights=fweights)
denominator = np.sqrt(covariance_matrices[0, 0] * covariance_matrices[1, 1])
if np.around(denominator, decimals=5) == 0:
expected_pearson_correlation = 0
else:
expected_pearson_correlation = covariance_matrices[0, 1] / denominator
return expected_pearson_correlation
class PearsonCorrelationTest(AllenNlpTestCase):
@multi_device
def test_pearson_correlation_unmasked_computation(self, device: str):
pearson_correlation = PearsonCorrelation()
batch_size = 100
num_labels = 10
predictions_1 = torch.randn(batch_size, num_labels, device=device)
labels_1 = 0.5 * predictions_1 + torch.randn(batch_size, num_labels, device=device)
predictions_2 = torch.randn(1, device=device).expand(num_labels)
predictions_2 = predictions_2.unsqueeze(0).expand(batch_size, -1)
labels_2 = torch.randn(1, device=device).expand(num_labels)
labels_2 = 0.5 * predictions_2 + labels_2.unsqueeze(0).expand(batch_size, -1)
# in most cases, the data is constructed like predictions_1, the data of such a batch different.
# but in a few cases, for example, predictions_2, the data of such a batch is exactly the same.
predictions_labels = [(predictions_1, labels_1), (predictions_2, labels_2)]
stride = 10
for predictions, labels in predictions_labels:
pearson_correlation.reset()
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
expected_pearson_correlation = pearson_corrcoef(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
)
pearson_correlation(timestep_predictions, timestep_labels)
assert_allclose(expected_pearson_correlation, pearson_correlation.get_metric())
# Test reset
pearson_correlation.reset()
pearson_correlation(predictions, labels)
assert_allclose(
pearson_corrcoef(predictions.view(-1).cpu().numpy(), labels.view(-1).cpu().numpy()),
pearson_correlation.get_metric(),
)
@multi_device
def test_pearson_correlation_masked_computation(self, device: str):
pearson_correlation = PearsonCorrelation()
batch_size = 100
num_labels = 10
predictions_1 = torch.randn(batch_size, num_labels, device=device)
labels_1 = 0.5 * predictions_1 + torch.randn(batch_size, num_labels, device=device)
predictions_2 = torch.randn(1, device=device).expand(num_labels)
predictions_2 = predictions_2.unsqueeze(0).expand(batch_size, -1)
labels_2 = torch.randn(1, device=device).expand(num_labels)
labels_2 = 0.5 * predictions_2 + labels_2.unsqueeze(0).expand(batch_size, -1)
predictions_labels = [(predictions_1, labels_1), (predictions_2, labels_2)]
# Random binary mask
mask = torch.randint(0, 2, size=(batch_size, num_labels), device=device).bool()
stride = 10
for predictions, labels in predictions_labels:
pearson_correlation.reset()
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
timestep_mask = mask[stride * i : stride * (i + 1), :]
expected_pearson_correlation = pearson_corrcoef(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
fweights=mask[: stride * (i + 1), :].view(-1).cpu().numpy(),
)
pearson_correlation(timestep_predictions, timestep_labels, timestep_mask)
assert_allclose(expected_pearson_correlation, pearson_correlation.get_metric())
# Test reset
pearson_correlation.reset()
pearson_correlation(predictions, labels, mask)
expected_pearson_correlation = pearson_corrcoef(
predictions.view(-1).cpu().numpy(),
labels.view(-1).cpu().numpy(),
fweights=mask.view(-1).cpu().numpy(),
)
assert_allclose(expected_pearson_correlation, pearson_correlation.get_metric())
# Commenting in order to revisit distributed covariance (on which PearsonCorrelation depends) later.
# def test_distributed_pearson(self):
# batch_size = 10
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# expected_pearson_correlation = pearson_corrcoef(
# predictions.view(-1).cpu().numpy(), labels.view(-1).cpu().numpy(),
# )
# predictions = [predictions[:5], predictions[5:]]
# labels = [labels[:5], labels[5:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels}
# run_distributed_test(
# [-1, -1],
# global_distributed_metric,
# PearsonCorrelation(),
# metric_kwargs,
# expected_pearson_correlation,
# exact=(0.0001, 1e-01),
# )
| allennlp-master | tests/training/metrics/pearson_correlation_test.py |
from typing import Dict, List, Tuple, Union, Any
import pytest
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import (
AllenNlpTestCase,
global_distributed_metric,
multi_device,
run_distributed_test,
)
from sklearn.metrics import precision_recall_fscore_support
from torch.testing import assert_allclose
from allennlp.training.metrics import FBetaMultiLabelMeasure
class FBetaMultiLabelMeasureTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.predictions = torch.tensor(
[
[0.55, 0.25, 0.10, 0.10, 0.20],
[0.10, 0.60, 0.10, 0.95, 0.00],
[0.90, 0.80, 0.75, 0.80, 0.00],
[0.49, 0.50, 0.95, 0.55, 0.00],
[0.60, 0.49, 0.60, 0.65, 0.85],
[0.85, 0.40, 0.10, 0.20, 0.00],
]
)
self.targets = torch.tensor(
[
[1, 1, 0, 0, 0],
[0, 1, 0, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
]
)
# detailed target state
self.pred_sum = [4, 3, 3, 4, 1]
self.true_sum = [4, 5, 2, 4, 0]
self.true_positive_sum = [3, 3, 2, 4, 0]
self.true_negative_sum = [1, 1, 3, 2, 5]
self.total_sum = [30, 30, 30, 30, 30]
# true_positive_sum / pred_sum
desired_precisions = [3 / 4, 3 / 3, 2 / 3, 4 / 4, 0 / 1]
# true_positive_sum / true_sum
desired_recalls = [3 / 4, 3 / 5, 2 / 2, 4 / 4, 0.00]
desired_fscores = [
(2 * p * r) / (p + r) if p + r != 0.0 else 0.0
for p, r in zip(desired_precisions, desired_recalls)
]
self.desired_precisions = desired_precisions
self.desired_recalls = desired_recalls
self.desired_fscores = desired_fscores
@multi_device
def test_config_errors(self, device: str):
# Bad beta
pytest.raises(ConfigurationError, FBetaMultiLabelMeasure, beta=0.0)
# Bad average option
pytest.raises(ConfigurationError, FBetaMultiLabelMeasure, average="mega")
# Empty input labels
pytest.raises(ConfigurationError, FBetaMultiLabelMeasure, labels=[])
@multi_device
def test_runtime_errors(self, device: str):
fbeta = FBetaMultiLabelMeasure()
# Metric was never called.
pytest.raises(RuntimeError, fbeta.get_metric)
@multi_device
def test_fbeta_multilabel_state(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMultiLabelMeasure()
fbeta(self.predictions, self.targets)
# check state
assert_allclose(fbeta._pred_sum.tolist(), self.pred_sum)
assert_allclose(fbeta._true_sum.tolist(), self.true_sum)
assert_allclose(fbeta._true_positive_sum.tolist(), self.true_positive_sum)
assert_allclose(fbeta._true_negative_sum.tolist(), self.true_negative_sum)
assert_allclose(fbeta._total_sum.tolist(), self.total_sum)
@multi_device
def test_fbeta_multilabel_metric(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMultiLabelMeasure()
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# check value
assert_allclose(precisions, self.desired_precisions)
assert_allclose(recalls, self.desired_recalls)
assert_allclose(fscores, self.desired_fscores)
# check type
assert isinstance(precisions, List)
assert isinstance(recalls, List)
assert isinstance(fscores, List)
@multi_device
def test_fbeta_multilabel_with_mask(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
mask = torch.tensor([True, True, True, True, True, False], device=device).unsqueeze(-1)
fbeta = FBetaMultiLabelMeasure()
fbeta(self.predictions, self.targets, mask)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(fbeta._pred_sum.tolist(), [3, 3, 3, 4, 1])
assert_allclose(fbeta._true_sum.tolist(), [4, 5, 2, 4, 0])
assert_allclose(fbeta._true_positive_sum.tolist(), [3, 3, 2, 4, 0])
desired_precisions = [3 / 3, 3 / 3, 2 / 3, 4 / 4, 0 / 1]
desired_recalls = [3 / 4, 3 / 5, 2 / 2, 4 / 4, 0.00]
desired_fscores = [
(2 * p * r) / (p + r) if p + r != 0.0 else 0.0
for p, r in zip(desired_precisions, desired_recalls)
]
assert_allclose(precisions, desired_precisions)
assert_allclose(recalls, desired_recalls)
assert_allclose(fscores, desired_fscores)
@multi_device
def test_fbeta_multilabel_macro_average_metric(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMultiLabelMeasure(average="macro")
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
macro_precision = torch.tensor(self.desired_precisions).mean()
macro_recall = torch.tensor(self.desired_recalls).mean()
macro_fscore = torch.tensor(self.desired_fscores).mean()
# check value
assert_allclose(precisions, macro_precision)
assert_allclose(recalls, macro_recall)
assert_allclose(fscores, macro_fscore)
# check type
assert isinstance(precisions, float)
assert isinstance(recalls, float)
assert isinstance(fscores, float)
@multi_device
def test_fbeta_multilabel_micro_average_metric(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMultiLabelMeasure(average="micro")
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
true_positives = torch.tensor([3, 3, 2, 4, 0], dtype=torch.float32)
false_positives = torch.tensor([1, 0, 1, 0, 1], dtype=torch.float32)
false_negatives = torch.tensor([1, 2, 0, 0, 0], dtype=torch.float32)
mean_true_positive = true_positives.mean()
mean_false_positive = false_positives.mean()
mean_false_negative = false_negatives.mean()
micro_precision = mean_true_positive / (mean_true_positive + mean_false_positive)
micro_recall = mean_true_positive / (mean_true_positive + mean_false_negative)
micro_fscore = (2 * micro_precision * micro_recall) / (micro_precision + micro_recall)
# check value
assert_allclose(precisions, micro_precision)
assert_allclose(recalls, micro_recall)
assert_allclose(fscores, micro_fscore)
@multi_device
def test_fbeta_multilabel_with_explicit_labels(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
# same prediction but with and explicit label ordering
fbeta = FBetaMultiLabelMeasure(labels=[4, 3, 2, 1, 0])
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
desired_precisions = self.desired_precisions[::-1]
desired_recalls = self.desired_recalls[::-1]
desired_fscores = self.desired_fscores[::-1]
# check value
assert_allclose(precisions, desired_precisions)
assert_allclose(recalls, desired_recalls)
assert_allclose(fscores, desired_fscores)
@multi_device
def test_fbeta_multilabel_with_macro_average(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
labels = [0, 1]
fbeta = FBetaMultiLabelMeasure(average="macro", labels=labels)
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
macro_precision = torch.tensor(self.desired_precisions)[labels].mean()
macro_recall = torch.tensor(self.desired_recalls)[labels].mean()
macro_fscore = torch.tensor(self.desired_fscores)[labels].mean()
# check value
assert_allclose(precisions, macro_precision)
assert_allclose(recalls, macro_recall)
assert_allclose(fscores, macro_fscore)
@multi_device
def test_fbeta_multilabel_with_micro_average(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
labels = [1, 3]
fbeta = FBetaMultiLabelMeasure(average="micro", labels=labels)
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
true_positives = torch.tensor([3, 4], dtype=torch.float32)
false_positives = torch.tensor([0, 0], dtype=torch.float32)
false_negatives = torch.tensor([2, 0], dtype=torch.float32)
mean_true_positive = true_positives.mean()
mean_false_positive = false_positives.mean()
mean_false_negative = false_negatives.mean()
micro_precision = mean_true_positive / (mean_true_positive + mean_false_positive)
micro_recall = mean_true_positive / (mean_true_positive + mean_false_negative)
micro_fscore = (2 * micro_precision * micro_recall) / (micro_precision + micro_recall)
# check value
assert_allclose(precisions, micro_precision)
assert_allclose(recalls, micro_recall)
assert_allclose(fscores, micro_fscore)
@multi_device
def test_fbeta_multilabel_with_weighted_average(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
labels = [0, 1]
fbeta = FBetaMultiLabelMeasure(average="weighted", labels=labels)
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
weighted_precision, weighted_recall, weighted_fscore, _ = precision_recall_fscore_support(
self.targets.cpu().numpy(),
torch.where(
self.predictions >= fbeta._threshold,
torch.ones_like(self.predictions),
torch.zeros_like(self.predictions),
)
.cpu()
.numpy(),
labels=labels,
average="weighted",
)
# check value
assert_allclose(precisions, weighted_precision)
assert_allclose(recalls, weighted_recall)
assert_allclose(fscores, weighted_fscore)
@multi_device
def test_fbeta_multilabel_handles_batch_size_of_one(self, device: str):
predictions = torch.tensor([[0.2862, 0.5479, 0.1627, 0.2033]], device=device)
targets = torch.tensor([[0, 1, 0, 0]], device=device)
mask = torch.tensor([[True]], device=device)
fbeta = FBetaMultiLabelMeasure()
fbeta(predictions, targets, mask)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
assert_allclose(precisions, [0.0, 1.0, 0.0, 0.0])
assert_allclose(recalls, [0.0, 1.0, 0.0, 0.0])
@multi_device
def test_fbeta_multilabel_handles_no_prediction_false_last_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([[1, 0], [1, 0]], device=device)
fbeta = FBetaMultiLabelMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [1.0, 0.0])
assert_allclose(recalls, [0.5, 0.0])
assert_allclose(fscores, [0.6667, 0.0])
@multi_device
def test_fbeta_multilabel_handles_no_prediction_true_last_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([[1, 0], [0, 1]], device=device)
fbeta = FBetaMultiLabelMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [1.0, 0.0])
assert_allclose(recalls, [1.0, 0.0])
assert_allclose(fscores, [1.0, 0.0])
@multi_device
def test_fbeta_multilabel_handles_no_prediction_true_other_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([[0, 1], [1, 0]], device=device)
fbeta = FBetaMultiLabelMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [0.0, 0.0])
assert_allclose(recalls, [0.0, 0.0])
assert_allclose(fscores, [0.0, 0.0])
@multi_device
def test_fbeta_multilabel_handles_no_prediction_true_all_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([[0, 1], [0, 1]], device=device)
fbeta = FBetaMultiLabelMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [0.0, 0.0])
assert_allclose(recalls, [0.0, 0.0])
assert_allclose(fscores, [0.0, 0.0])
def test_distributed_fbeta_multilabel_measure(self):
predictions = [
torch.tensor(
[
[0.55, 0.25, 0.10, 0.10, 0.20],
[0.10, 0.60, 0.10, 0.95, 0.00],
[0.90, 0.80, 0.75, 0.80, 0.00],
]
),
torch.tensor(
[
[0.49, 0.50, 0.95, 0.55, 0.00],
[0.60, 0.49, 0.60, 0.65, 0.85],
[0.85, 0.40, 0.10, 0.20, 0.00],
]
),
]
targets = [
torch.tensor([[1, 1, 0, 0, 0], [0, 1, 0, 1, 0], [1, 1, 0, 1, 0]]),
torch.tensor([[1, 1, 1, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 0, 0]]),
]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_metrics = {
"precision": self.desired_precisions,
"recall": self.desired_recalls,
"fscore": self.desired_fscores,
}
run_distributed_test(
[-1, -1],
global_distributed_metric,
FBetaMultiLabelMeasure(),
metric_kwargs,
desired_metrics,
exact=False,
)
def test_multiple_distributed_runs(self):
predictions = [
torch.tensor(
[
[0.55, 0.25, 0.10, 0.10, 0.20],
[0.10, 0.60, 0.10, 0.95, 0.00],
[0.90, 0.80, 0.75, 0.80, 0.00],
]
),
torch.tensor(
[
[0.49, 0.50, 0.95, 0.55, 0.00],
[0.60, 0.49, 0.60, 0.65, 0.85],
[0.85, 0.40, 0.10, 0.20, 0.00],
]
),
]
targets = [
torch.tensor([[1, 1, 0, 0, 0], [0, 1, 0, 1, 0], [1, 1, 0, 1, 0]]),
torch.tensor([[1, 1, 1, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 0, 0]]),
]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_metrics = {
"precision": self.desired_precisions,
"recall": self.desired_recalls,
"fscore": self.desired_fscores,
}
run_distributed_test(
[-1, -1],
multiple_runs,
FBetaMultiLabelMeasure(),
metric_kwargs,
desired_metrics,
exact=False,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: FBetaMultiLabelMeasure,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
metric_values = metric.get_metric()
for key in desired_values:
assert_allclose(desired_values[key], metric_values[key])
| allennlp-master | tests/training/metrics/fbeta_multi_label_measure_test.py |
import pytest
import torch
from sklearn import metrics
from torch.testing import assert_allclose
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import Auc
class AucTest(AllenNlpTestCase):
@multi_device
def test_auc_computation(self, device: str):
auc = Auc()
all_predictions = []
all_labels = []
for _ in range(5):
predictions = torch.randn(8, device=device)
labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)
auc(predictions, labels)
all_predictions.append(predictions)
all_labels.append(labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
torch.cat(all_labels, dim=0).cpu().numpy(),
torch.cat(all_predictions, dim=0).cpu().numpy(),
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
# One more computation to assure reset works.
predictions = torch.randn(8, device=device)
labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)
auc(predictions, labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy()
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
@multi_device
def test_auc_gold_labels_behaviour(self, device: str):
# Check that it works with different pos_label
auc = Auc(positive_label=4)
predictions = torch.randn(8, device=device)
labels = torch.randint(3, 5, (8,), dtype=torch.long, device=device)
# We make sure that the positive label is always present.
labels[0] = 4
auc(predictions, labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
# Check that it errs on getting more than 2 labels.
with pytest.raises(ConfigurationError) as _:
labels = torch.tensor([3, 4, 5, 6, 7, 8, 9, 10], device=device)
auc(predictions, labels)
@multi_device
def test_auc_with_mask(self, device: str):
auc = Auc()
predictions = torch.randn(8, device=device)
labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)
mask = torch.tensor([True, True, True, True, False, False, False, False], device=device)
auc(predictions, labels, mask)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels[:4].cpu().numpy(), predictions[:4].cpu().numpy()
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
@multi_device
def test_auc_works_without_calling_metric_at_all(self, device: str):
auc = Auc()
auc.get_metric()
def test_distributed_auc(self):
predictions = torch.randn(8)
labels = torch.randint(3, 5, (8,), dtype=torch.long)
# We make sure that the positive label is always present.
labels[0] = 4
labels[4] = 4
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
predictions = [predictions[:4], predictions[4:]]
labels = [labels[:4], labels[4:]]
metric_kwargs = {"predictions": predictions, "gold_labels": labels}
desired_auc = metrics.auc(false_positive_rates, true_positive_rates)
run_distributed_test(
[-1, -1],
global_distributed_metric,
Auc(positive_label=4),
metric_kwargs,
desired_auc,
exact=False,
)
def test_distributed_auc_unequal_batches(self):
predictions = torch.randn(8)
labels = torch.randint(3, 5, (8,), dtype=torch.long)
# We make sure that the positive label is always present.
labels[0] = 4
labels[4] = 4
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
predictions = [predictions[:2], predictions[2:]]
labels = [labels[:2], labels[2:]]
metric_kwargs = {"predictions": predictions, "gold_labels": labels}
desired_auc = metrics.auc(false_positive_rates, true_positive_rates)
with pytest.raises(Exception) as _:
run_distributed_test(
[-1, -1],
global_distributed_metric,
Auc(positive_label=4),
metric_kwargs,
desired_auc,
exact=False,
)
| allennlp-master | tests/training/metrics/auc_test.py |
from typing import Any, Dict, List, Tuple, Union
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import SequenceAccuracy
class SequenceAccuracyTest(AllenNlpTestCase):
@multi_device
def test_sequence_accuracy(self, device: str):
accuracy = SequenceAccuracy()
gold = torch.tensor([[1, 2, 3], [2, 4, 8], [0, 1, 1]], device=device)
predictions = torch.tensor(
[[[1, 2, 3], [1, 2, -1]], [[2, 4, 8], [2, 5, 9]], [[-1, -1, -1], [0, 1, -1]]],
device=device,
)
accuracy(predictions, gold)
actual_accuracy = accuracy.get_metric()["accuracy"]
assert_allclose(actual_accuracy, 2 / 3)
@multi_device
def test_sequence_accuracy_respects_mask(self, device: str):
accuracy = SequenceAccuracy()
gold = torch.tensor([[1, 2, 3], [2, 4, 8], [0, 1, 1], [11, 13, 17]], device=device)
predictions = torch.tensor(
[
[[1, 2, 3], [1, 2, -1]],
[[2, 4, 8], [2, 5, 9]],
[[-1, -1, -1], [0, 1, -1]],
[[12, 13, 17], [11, 13, 18]],
],
device=device,
)
mask = torch.tensor(
[[False, True, True], [True, True, True], [True, True, False], [True, False, True]],
device=device,
)
accuracy(predictions, gold, mask)
actual_accuracy = accuracy.get_metric()["accuracy"]
assert_allclose(actual_accuracy, 3 / 4)
@multi_device
def test_sequence_accuracy_accumulates_and_resets_correctly(self, device: str):
accuracy = SequenceAccuracy()
gold = torch.tensor([[1, 2, 3]], device=device)
accuracy(torch.tensor([[[1, 2, 3]]], device=device), gold)
accuracy(torch.tensor([[[1, 2, 4]]], device=device), gold)
actual_accuracy = accuracy.get_metric(reset=True)["accuracy"]
assert_allclose(actual_accuracy, 1 / 2)
assert accuracy.correct_count == 0
assert accuracy.total_count == 0
@multi_device
def test_get_metric_on_new_object_works(self, device: str):
accuracy = SequenceAccuracy()
actual_accuracy = accuracy.get_metric(reset=True)["accuracy"]
assert_allclose(actual_accuracy, 0)
def test_distributed_sequence_accuracy(self):
gold = torch.tensor([[1, 2, 3], [2, 4, 8], [0, 1, 1], [11, 13, 17]])
predictions = torch.tensor(
[
[[1, 2, 3], [1, 2, -1]],
[[2, 4, 8], [2, 5, 9]],
[[-1, -1, -1], [0, 1, -1]],
[[12, 13, 17], [11, 13, 18]],
]
)
mask = torch.tensor(
[[False, True, True], [True, True, True], [True, True, False], [True, False, True]],
)
gold = [gold[:2], gold[2:]]
predictions = [predictions[:2], predictions[2:]]
mask = [mask[:2], mask[2:]]
metric_kwargs = {"predictions": predictions, "gold_labels": gold, "mask": mask}
desired_values = {"accuracy": 3 / 4}
run_distributed_test(
[-1, -1],
global_distributed_metric,
SequenceAccuracy(),
metric_kwargs,
desired_values,
exact=False,
)
def test_multiple_distributed_runs(self):
gold = torch.tensor([[1, 2, 3], [2, 4, 8], [0, 1, 1], [11, 13, 17]])
predictions = torch.tensor(
[
[[1, 2, 3], [1, 2, -1]],
[[2, 4, 8], [2, 5, 9]],
[[-1, -1, -1], [0, 1, -1]],
[[12, 13, 17], [11, 13, 18]],
]
)
mask = torch.tensor(
[[False, True, True], [True, True, True], [True, True, False], [True, False, True]],
)
gold = [gold[:2], gold[2:]]
predictions = [predictions[:2], predictions[2:]]
mask = [mask[:2], mask[2:]]
metric_kwargs = {"predictions": predictions, "gold_labels": gold, "mask": mask}
desired_values = {"accuracy": 3 / 4}
run_distributed_test(
[-1, -1],
multiple_runs,
SequenceAccuracy(),
metric_kwargs,
desired_values,
exact=True,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: SequenceAccuracy,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert desired_values["accuracy"] == metric.get_metric()["accuracy"]
| allennlp-master | tests/training/metrics/sequence_accuracy_test.py |
allennlp-master | tests/training/metrics/__init__.py |
|
from typing import Any, Dict, List, Tuple, Union
import torch
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import MeanAbsoluteError
class MeanAbsoluteErrorTest(AllenNlpTestCase):
@multi_device
def test_mean_absolute_error_computation(self, device: str):
mae = MeanAbsoluteError()
predictions = torch.tensor(
[[1.0, 1.5, 1.0], [2.0, 3.0, 3.5], [4.0, 5.0, 5.5], [6.0, 7.0, 7.5]], device=device
)
targets = torch.tensor(
[[0.0, 1.0, 0.0], [2.0, 2.0, 0.0], [4.0, 5.0, 0.0], [7.0, 7.0, 0.0]], device=device
)
mae(predictions, targets)
assert mae.get_metric()["mae"] == 21.0 / 12.0
mask = torch.tensor(
[[True, True, False], [True, True, False], [True, True, False], [True, True, False]],
device=device,
)
mae(predictions, targets, mask)
assert mae.get_metric()["mae"] == (21.0 + 3.5) / (12.0 + 8.0)
new_targets = torch.tensor(
[[2.0, 2.0, 0.0], [0.0, 1.0, 0.0], [7.0, 7.0, 0.0], [4.0, 5.0, 0.0]], device=device
)
mae(predictions, new_targets)
assert mae.get_metric()["mae"] == (21.0 + 3.5 + 32.0) / (12.0 + 8.0 + 12.0)
mae.reset()
mae(predictions, new_targets)
assert mae.get_metric()["mae"] == 32.0 / 12.0
def test_distributed_accuracy(self):
predictions = [
torch.tensor([[1.0, 1.5, 1.0], [2.0, 3.0, 3.5]]),
torch.tensor([[4.0, 5.0, 5.5], [6.0, 7.0, 7.5]]),
]
targets = [
torch.tensor([[0.0, 1.0, 0.0], [2.0, 2.0, 0.0]]),
torch.tensor([[4.0, 5.0, 0.0], [7.0, 7.0, 0.0]]),
]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = {"mae": 21.0 / 12.0}
run_distributed_test(
[-1, -1],
global_distributed_metric,
MeanAbsoluteError(),
metric_kwargs,
desired_values,
exact=True,
)
def test_multiple_distributed_runs(self):
predictions = [
torch.tensor([[1.0, 1.5, 1.0], [2.0, 3.0, 3.5]]),
torch.tensor([[4.0, 5.0, 5.5], [6.0, 7.0, 7.5]]),
]
targets = [
torch.tensor([[0.0, 1.0, 0.0], [2.0, 2.0, 0.0]]),
torch.tensor([[4.0, 5.0, 0.0], [7.0, 7.0, 0.0]]),
]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = {"mae": 21.0 / 12.0}
run_distributed_test(
[-1, -1],
multiple_runs,
MeanAbsoluteError(),
metric_kwargs,
desired_values,
exact=True,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: MeanAbsoluteError,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert desired_values["mae"] == metric.get_metric()["mae"]
| allennlp-master | tests/training/metrics/mean_absolute_error_test.py |
from typing import Any, Dict, List, Tuple, Union
import math
from collections import Counter
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import BLEU
from allennlp.training.util import ngrams, get_valid_tokens_mask
class BleuTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.metric = BLEU(ngram_weights=(0.5, 0.5), exclude_indices={0})
@multi_device
def test_get_valid_tokens_mask(self, device: str):
tensor = torch.tensor([[1, 2, 3, 0], [0, 1, 1, 0]], device=device)
result = get_valid_tokens_mask(tensor, self.metric._exclude_indices).long()
check = torch.tensor([[1, 1, 1, 0], [0, 1, 1, 0]], device=device)
assert_allclose(result, check)
@multi_device
def test_ngrams(self, device: str):
tensor = torch.tensor([1, 2, 3, 1, 2, 0], device=device)
exclude_indices = self.metric._exclude_indices
# Unigrams.
counts: Counter = Counter(ngrams(tensor, 1, exclude_indices))
unigram_check = {(1,): 2, (2,): 2, (3,): 1}
assert counts == unigram_check
# Bigrams.
counts = Counter(ngrams(tensor, 2, exclude_indices))
bigram_check = {(1, 2): 2, (2, 3): 1, (3, 1): 1}
assert counts == bigram_check
# Trigrams.
counts = Counter(ngrams(tensor, 3, exclude_indices))
trigram_check = {(1, 2, 3): 1, (2, 3, 1): 1, (3, 1, 2): 1}
assert counts == trigram_check
# ngram size too big, no ngrams produced.
counts = Counter(ngrams(tensor, 7, exclude_indices))
assert counts == {}
@multi_device
def test_bleu_computed_correctly(self, device: str):
self.metric.reset()
# shape: (batch_size, max_sequence_length)
predictions = torch.tensor([[1, 0, 0], [1, 1, 0], [1, 1, 1]], device=device)
# shape: (batch_size, max_gold_sequence_length)
gold_targets = torch.tensor([[2, 0, 0], [1, 0, 0], [1, 1, 2]], device=device)
self.metric(predictions, gold_targets)
assert self.metric._prediction_lengths == 6
assert self.metric._reference_lengths == 5
# Number of unigrams in predicted sentences that match gold sentences
# (but not more than maximum occurrence of gold unigram within batch).
assert self.metric._precision_matches[1] == (
0
+ 1 # no matches in first sentence.
+ 2 # one clipped match in second sentence. # two clipped matches in third sentence.
)
# Total number of predicted unigrams.
assert self.metric._precision_totals[1] == (1 + 2 + 3)
# Number of bigrams in predicted sentences that match gold sentences
# (but not more than maximum occurrence of gold bigram within batch).
assert self.metric._precision_matches[2] == (0 + 0 + 1)
# Total number of predicted bigrams.
assert self.metric._precision_totals[2] == (0 + 1 + 2)
# Brevity penalty should be 1.0
assert self.metric._get_brevity_penalty() == 1.0
bleu = self.metric.get_metric(reset=True)["BLEU"]
check = math.exp(0.5 * (math.log(3) - math.log(6)) + 0.5 * (math.log(1) - math.log(3)))
assert_allclose(bleu, check)
@multi_device
def test_bleu_computed_with_zero_counts(self, device: str):
self.metric.reset()
assert self.metric.get_metric()["BLEU"] == 0
def test_distributed_bleu(self):
predictions = [
torch.tensor([[1, 0, 0], [1, 1, 0]]),
torch.tensor([[1, 1, 1]]),
]
gold_targets = [
torch.tensor([[2, 0, 0], [1, 0, 0]]),
torch.tensor([[1, 1, 2]]),
]
check = math.exp(0.5 * (math.log(3) - math.log(6)) + 0.5 * (math.log(1) - math.log(3)))
metric_kwargs = {"predictions": predictions, "gold_targets": gold_targets}
desired_values = {"BLEU": check}
run_distributed_test(
[-1, -1],
global_distributed_metric,
BLEU(ngram_weights=(0.5, 0.5), exclude_indices={0}),
metric_kwargs,
desired_values,
exact=False,
)
def test_multiple_distributed_runs(self):
predictions = [
torch.tensor([[1, 0, 0], [1, 1, 0]]),
torch.tensor([[1, 1, 1]]),
]
gold_targets = [
torch.tensor([[2, 0, 0], [1, 0, 0]]),
torch.tensor([[1, 1, 2]]),
]
check = math.exp(0.5 * (math.log(3) - math.log(6)) + 0.5 * (math.log(1) - math.log(3)))
metric_kwargs = {"predictions": predictions, "gold_targets": gold_targets}
desired_values = {"BLEU": check}
run_distributed_test(
[-1, -1],
multiple_runs,
BLEU(ngram_weights=(0.5, 0.5), exclude_indices={0}),
metric_kwargs,
desired_values,
exact=False,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: BLEU,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert_allclose(desired_values["BLEU"], metric.get_metric()["BLEU"])
| allennlp-master | tests/training/metrics/bleu_test.py |
import pytest
import torch
from torch.testing import assert_allclose
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
run_distributed_test,
global_distributed_metric,
)
from allennlp.training.metrics import F1Measure
class F1MeasureTest(AllenNlpTestCase):
@multi_device
def test_f1_measure_catches_exceptions(self, device: str):
f1_measure = F1Measure(0)
predictions = torch.rand([5, 7], device=device)
out_of_range_labels = torch.tensor([10, 3, 4, 0, 1], device=device)
with pytest.raises(ConfigurationError):
f1_measure(predictions, out_of_range_labels)
@multi_device
def test_f1_measure(self, device: str):
f1_measure = F1Measure(positive_label=0)
predictions = torch.tensor(
[
[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.5, 0.1, 0.2, 0.0],
[0.1, 0.2, 0.1, 0.7, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
],
device=device,
)
# [True Positive, True Negative, True Negative,
# False Negative, True Negative, False Negative]
targets = torch.tensor([0, 4, 1, 0, 3, 0], device=device)
f1_measure(predictions, targets)
metrics = f1_measure.get_metric()
precision = metrics["precision"]
recall = metrics["recall"]
f1 = metrics["f1"]
assert f1_measure._true_positives == 1.0
assert f1_measure._true_negatives == 3.0
assert f1_measure._false_positives == 0.0
assert f1_measure._false_negatives == 2.0
f1_measure.reset()
# check value
assert_allclose(precision, 1.0)
assert_allclose(recall, 0.333333333)
assert_allclose(f1, 0.499999999)
# check type
assert isinstance(precision, float)
assert isinstance(recall, float)
assert isinstance(f1, float)
# Test the same thing with a mask:
mask = torch.tensor([True, False, True, True, True, False], device=device)
f1_measure(predictions, targets, mask)
metrics = f1_measure.get_metric()
precision = metrics["precision"]
recall = metrics["recall"]
f1 = metrics["f1"]
assert f1_measure._true_positives == 1.0
assert f1_measure._true_negatives == 2.0
assert f1_measure._false_positives == 0.0
assert f1_measure._false_negatives == 1.0
f1_measure.reset()
assert_allclose(precision, 1.0)
assert_allclose(recall, 0.5)
assert_allclose(f1, 0.6666666666)
@multi_device
def test_f1_measure_other_positive_label(self, device: str):
f1_measure = F1Measure(positive_label=1)
predictions = torch.tensor(
[
[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.5, 0.1, 0.2, 0.0],
[0.1, 0.2, 0.1, 0.7, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
],
device=device,
)
# [True Negative, False Positive, True Positive,
# False Positive, True Negative, False Positive]
targets = torch.tensor([0, 4, 1, 0, 3, 0], device=device)
f1_measure(predictions, targets)
metrics = f1_measure.get_metric()
precision = metrics["precision"]
recall = metrics["recall"]
f1 = metrics["f1"]
assert f1_measure._true_positives == 1.0
assert f1_measure._true_negatives == 2.0
assert f1_measure._false_positives == 3.0
assert f1_measure._false_negatives == 0.0
f1_measure.reset()
# check value
assert_allclose(precision, 0.25)
assert_allclose(recall, 1.0)
assert_allclose(f1, 0.4)
# check type
assert isinstance(precision, float)
assert isinstance(recall, float)
assert isinstance(f1, float)
@multi_device
def test_f1_measure_accumulates_and_resets_correctly(self, device: str):
f1_measure = F1Measure(positive_label=0)
predictions = torch.tensor(
[
[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.5, 0.1, 0.2, 0.0],
[0.1, 0.2, 0.1, 0.7, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
],
device=device,
)
# [True Positive, True Negative, True Negative,
# False Negative, True Negative, False Negative]
targets = torch.tensor([0, 4, 1, 0, 3, 0], device=device)
f1_measure(predictions, targets)
f1_measure(predictions, targets)
metrics = f1_measure.get_metric()
precision = metrics["precision"]
recall = metrics["recall"]
f1 = metrics["f1"]
assert f1_measure._true_positives == 2.0
assert f1_measure._true_negatives == 6.0
assert f1_measure._false_positives == 0.0
assert f1_measure._false_negatives == 4.0
f1_measure.reset()
assert_allclose(precision, 1.0)
assert_allclose(recall, 0.333333333)
assert_allclose(f1, 0.499999999)
assert f1_measure._true_positives == 0.0
assert f1_measure._true_negatives == 0.0
assert f1_measure._false_positives == 0.0
assert f1_measure._false_negatives == 0.0
@multi_device
def test_f1_measure_works_for_sequences(self, device: str):
f1_measure = F1Measure(positive_label=0)
predictions = torch.tensor(
[
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]],
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]],
],
device=device,
)
# [[True Positive, True Negative, True Negative],
# [True Positive, True Negative, False Negative]]
targets = torch.tensor([[0, 3, 4], [0, 1, 0]], device=device)
f1_measure(predictions, targets)
metrics = f1_measure.get_metric()
precision = metrics["precision"]
recall = metrics["recall"]
f1 = metrics["f1"]
assert f1_measure._true_positives == 2.0
assert f1_measure._true_negatives == 3.0
assert f1_measure._false_positives == 0.0
assert f1_measure._false_negatives == 1.0
f1_measure.reset()
assert_allclose(precision, 1.0)
assert_allclose(recall, 0.666666666)
assert_allclose(f1, 0.8)
# Test the same thing with a mask:
mask = torch.tensor([[False, True, False], [True, True, True]], device=device)
f1_measure(predictions, targets, mask)
metrics = f1_measure.get_metric()
precision = metrics["precision"]
recall = metrics["recall"]
f1 = metrics["f1"]
assert f1_measure._true_positives == 1.0
assert f1_measure._true_negatives == 2.0
assert f1_measure._false_positives == 0.0
assert f1_measure._false_negatives == 1.0
assert_allclose(precision, 1.0)
assert_allclose(recall, 0.5)
assert_allclose(f1, 0.66666666666)
def test_distributed_fbeta_measure(self):
predictions = [
torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]]
),
torch.tensor(
[[0.1, 0.5, 0.1, 0.2, 0.0], [0.1, 0.2, 0.1, 0.7, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]]
),
]
targets = [torch.tensor([0, 4, 1]), torch.tensor([0, 3, 0])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_metrics = {
"precision": 1.0,
"recall": 0.333333333,
"f1": 0.499999999,
}
run_distributed_test(
[-1, -1],
global_distributed_metric,
F1Measure(positive_label=0),
metric_kwargs,
desired_metrics,
exact=False,
)
| allennlp-master | tests/training/metrics/f1_measure_test.py |
from typing import Any, Dict, List, Tuple, Union
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import Entropy
class EntropyTest(AllenNlpTestCase):
@multi_device
def test_low_entropy_distribution(self, device: str):
metric = Entropy()
logits = torch.tensor(
[[10000, -10000, -10000, -1000], [10000, -10000, -10000, -1000]],
dtype=torch.float,
device=device,
)
metric(logits)
assert metric.get_metric()["entropy"] == 0.0
@multi_device
def test_entropy_for_uniform_distribution(self, device: str):
metric = Entropy()
logits = torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]], dtype=torch.float, device=device)
metric(logits)
assert_allclose(metric.get_metric()["entropy"], 1.38629436)
# actual values shouldn't effect uniform distribution:
logits = torch.tensor([[2, 2, 2, 2], [2, 2, 2, 2]], dtype=torch.float, device=device)
metric(logits)
assert_allclose(metric.get_metric()["entropy"], 1.38629436)
metric.reset()
assert metric._entropy == 0.0
assert metric._count == 0.0
@multi_device
def test_masked_case(self, device: str):
metric = Entropy()
# This would have non-zero entropy without the mask.
logits = torch.tensor(
[[1, 1, 1, 1], [10000, -10000, -10000, -1000]], dtype=torch.float, device=device
)
mask = torch.tensor([False, True], device=device)
metric(logits, mask)
assert metric.get_metric()["entropy"] == 0.0
def test_distributed_entropy(self):
logits = torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]], dtype=torch.float)
logits = [logits[0], logits[1]]
metric_kwargs = {"logits": logits}
desired_values = {"entropy": 1.38629436}
run_distributed_test(
[-1, -1],
global_distributed_metric,
Entropy(),
metric_kwargs,
desired_values,
exact=False,
)
def test_multiple_distributed_runs(self):
logits = torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]], dtype=torch.float)
logits = [logits[0], logits[1]]
metric_kwargs = {"logits": logits}
desired_values = {"entropy": 1.38629436}
run_distributed_test(
[-1, -1],
multiple_runs,
Entropy(),
metric_kwargs,
desired_values,
exact=False,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: Entropy,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert_allclose(desired_values["entropy"], metric.get_metric()["entropy"])
| allennlp-master | tests/training/metrics/entropy_test.py |
from typing import Any, Dict, List, Tuple, Union
import torch
import pytest
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import BooleanAccuracy
class BooleanAccuracyTest(AllenNlpTestCase):
@multi_device
def test_accuracy_computation(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.tensor([[0, 1], [2, 3], [4, 5], [6, 7]], device=device)
targets = torch.tensor([[0, 1], [2, 2], [4, 5], [7, 7]], device=device)
accuracy(predictions, targets)
assert accuracy.get_metric() == 2 / 4
mask = torch.ones(4, 2, device=device).bool()
mask[1, 1] = 0
accuracy(predictions, targets, mask)
assert accuracy.get_metric() == 5 / 8
targets[1, 1] = 3
accuracy(predictions, targets)
assert accuracy.get_metric() == 8 / 12
accuracy.reset()
accuracy(predictions, targets)
assert accuracy.get_metric() == 3 / 4
@multi_device
def test_skips_completely_masked_instances(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.tensor([[0, 1], [2, 3], [4, 5], [6, 7]], device=device)
targets = torch.tensor([[0, 1], [2, 2], [4, 5], [7, 7]], device=device)
mask = torch.tensor(
[[False, False], [True, False], [True, True], [True, True]], device=device
)
accuracy(predictions, targets, mask)
# First example should be skipped, second is correct with mask, third is correct, fourth is wrong.
assert accuracy.get_metric() == 2 / 3
@multi_device
def test_incorrect_gold_labels_shape_catches_exceptions(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.rand([5, 7], device=device)
incorrect_shape_labels = torch.rand([5, 8], device=device)
with pytest.raises(ValueError):
accuracy(predictions, incorrect_shape_labels)
@multi_device
def test_incorrect_mask_shape_catches_exceptions(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.rand([5, 7], device=device)
labels = torch.rand([5, 7], device=device)
incorrect_shape_mask = torch.randint(0, 2, [5, 8], device=device).bool()
with pytest.raises(ValueError):
accuracy(predictions, labels, incorrect_shape_mask)
@multi_device
def test_does_not_divide_by_zero_with_no_count(self, device: str):
accuracy = BooleanAccuracy()
assert accuracy.get_metric() == pytest.approx(0.0)
def test_distributed_accuracy(self):
predictions = [torch.tensor([[0, 1], [2, 3]]), torch.tensor([[4, 5], [6, 7]])]
targets = [torch.tensor([[0, 1], [2, 2]]), torch.tensor([[4, 5], [7, 7]])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = 0.5
run_distributed_test(
[-1, -1],
global_distributed_metric,
BooleanAccuracy(),
metric_kwargs,
desired_values,
exact=True,
)
def test_distributed_accuracy_unequal_batches(self):
predictions = [torch.tensor([[0, 1], [2, 3], [4, 5]]), torch.tensor([[6, 7]])]
targets = [torch.tensor([[0, 1], [2, 2], [4, 5]]), torch.tensor([[7, 7]])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = 0.5
run_distributed_test(
[-1, -1],
global_distributed_metric,
BooleanAccuracy(),
metric_kwargs,
desired_values,
exact=True,
)
def test_multiple_distributed_runs(self):
predictions = [torch.tensor([[0, 1], [2, 3]]), torch.tensor([[4, 5], [6, 7]])]
targets = [torch.tensor([[0, 1], [2, 2]]), torch.tensor([[4, 5], [7, 7]])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = 0.5
run_distributed_test(
[-1, -1],
multiple_runs,
BooleanAccuracy(),
metric_kwargs,
desired_values,
exact=True,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: BooleanAccuracy,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert desired_values == metric.get_metric()
| allennlp-master | tests/training/metrics/boolean_accuracy_test.py |
from typing import Any, Dict, List, Tuple, Union
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import UnigramRecall
class UnigramRecallTest(AllenNlpTestCase):
@multi_device
def test_sequence_recall(self, device: str):
recall = UnigramRecall()
gold = torch.tensor([[1, 2, 3], [2, 4, 8], [7, 1, 1]], device=device)
predictions = torch.tensor(
[[[1, 2, 3], [1, 2, -1]], [[2, 4, 8], [2, 5, 9]], [[-1, -1, -1], [7, 1, -1]]],
device=device,
)
recall(predictions, gold)
actual_recall = recall.get_metric()["unigram_recall"]
assert_allclose(actual_recall, 1)
@multi_device
def test_sequence_recall_respects_mask(self, device: str):
recall = UnigramRecall()
gold = torch.tensor([[2, 4, 8], [1, 2, 3], [7, 1, 1], [11, 14, 17]], device=device)
predictions = torch.tensor(
[
[[2, 4, 8], [2, 5, 9]], # 3/3
[[-1, 2, 4], [3, 8, -1]], # 2/2
[[-1, -1, -1], [7, 2, -1]], # 1/2
[[12, 13, 17], [11, 13, 18]], # 2/2
],
device=device,
)
mask = torch.tensor(
[[True, True, True], [False, True, True], [True, True, False], [True, False, True]],
device=device,
)
recall(predictions, gold, mask)
actual_recall = recall.get_metric()["unigram_recall"]
assert_allclose(actual_recall, 7 / 8)
@multi_device
def test_sequence_recall_accumulates_and_resets_correctly(self, device: str):
recall = UnigramRecall()
gold = torch.tensor([[1, 2, 3]], device=device)
recall(torch.tensor([[[1, 2, 3]]], device=device), gold)
recall(torch.tensor([[[7, 8, 4]]], device=device), gold)
actual_recall = recall.get_metric(reset=True)["unigram_recall"]
assert_allclose(actual_recall, 1 / 2)
assert recall.correct_count == 0
assert recall.total_count == 0
@multi_device
def test_get_metric_on_new_object_works(self, device: str):
recall = UnigramRecall()
actual_recall = recall.get_metric(reset=True)["unigram_recall"]
assert_allclose(actual_recall, 0)
def test_distributed_accuracy(self):
gold = torch.tensor([[2, 4, 8], [1, 2, 3], [7, 1, 1], [11, 14, 17]])
predictions = torch.tensor(
[
[[2, 4, 8], [2, 5, 9]], # 3/3
[[-1, 2, 4], [3, 8, -1]], # 2/2
[[-1, -1, -1], [7, 2, -1]], # 1/2
[[12, 13, 17], [11, 13, 18]], # 2/2
]
)
mask = torch.tensor(
[[True, True, True], [False, True, True], [True, True, False], [True, False, True]]
)
gold = [gold[:2], gold[2:]]
predictions = [predictions[:2], predictions[2:]]
mask = [mask[:2], mask[2:]]
metric_kwargs = {"predictions": predictions, "gold_labels": gold, "mask": mask}
desired_values = {"unigram_recall": 7 / 8}
run_distributed_test(
[-1, -1],
global_distributed_metric,
UnigramRecall(),
metric_kwargs,
desired_values,
exact=False,
)
def test_multiple_distributed_runs(self):
gold = torch.tensor([[2, 4, 8], [1, 2, 3], [7, 1, 1], [11, 14, 17]])
predictions = torch.tensor(
[
[[2, 4, 8], [2, 5, 9]], # 3/3
[[-1, 2, 4], [3, 8, -1]], # 2/2
[[-1, -1, -1], [7, 2, -1]], # 1/2
[[12, 13, 17], [11, 13, 18]], # 2/2
]
)
mask = torch.tensor(
[[True, True, True], [False, True, True], [True, True, False], [True, False, True]]
)
gold = [gold[:2], gold[2:]]
predictions = [predictions[:2], predictions[2:]]
mask = [mask[:2], mask[2:]]
metric_kwargs = {"predictions": predictions, "gold_labels": gold, "mask": mask}
desired_values = {"unigram_recall": 7 / 8}
run_distributed_test(
[-1, -1],
multiple_runs,
UnigramRecall(),
metric_kwargs,
desired_values,
exact=True,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: UnigramRecall,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert desired_values["unigram_recall"] == metric.get_metric()["unigram_recall"]
| allennlp-master | tests/training/metrics/unigram_recall_test.py |
from typing import Any, Dict, List, Tuple, Union
import torch
from sklearn.metrics import precision_recall_fscore_support
from torch.testing import assert_allclose
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
run_distributed_test,
global_distributed_metric,
)
from allennlp.training.metrics import FBetaMeasure
class FBetaMeasureTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
# [0, 1, 1, 1, 3, 1]
self.predictions = torch.tensor(
[
[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.5, 0.1, 0.2, 0.0],
[0.1, 0.2, 0.1, 0.7, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0],
]
)
self.targets = torch.tensor([0, 4, 1, 0, 3, 0])
# detailed target state
self.pred_sum = [1, 4, 0, 1, 0]
self.true_sum = [3, 1, 0, 1, 1]
self.true_positive_sum = [1, 1, 0, 1, 0]
self.true_negative_sum = [3, 2, 6, 5, 5]
self.total_sum = [6, 6, 6, 6, 6]
desired_precisions = [1.00, 0.25, 0.00, 1.00, 0.00]
desired_recalls = [1 / 3, 1.00, 0.00, 1.00, 0.00]
desired_fscores = [
(2 * p * r) / (p + r) if p + r != 0.0 else 0.0
for p, r in zip(desired_precisions, desired_recalls)
]
self.desired_precisions = desired_precisions
self.desired_recalls = desired_recalls
self.desired_fscores = desired_fscores
@multi_device
def test_config_errors(self, device: str):
# Bad beta
pytest.raises(ConfigurationError, FBetaMeasure, beta=0.0)
# Bad average option
pytest.raises(ConfigurationError, FBetaMeasure, average="mega")
# Empty input labels
pytest.raises(ConfigurationError, FBetaMeasure, labels=[])
@multi_device
def test_runtime_errors(self, device: str):
fbeta = FBetaMeasure()
# Metric was never called.
pytest.raises(RuntimeError, fbeta.get_metric)
@multi_device
def test_fbeta_multiclass_state(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMeasure()
fbeta(self.predictions, self.targets)
# check state
assert_allclose(fbeta._pred_sum.tolist(), self.pred_sum)
assert_allclose(fbeta._true_sum.tolist(), self.true_sum)
assert_allclose(fbeta._true_positive_sum.tolist(), self.true_positive_sum)
assert_allclose(fbeta._true_negative_sum.tolist(), self.true_negative_sum)
assert_allclose(fbeta._total_sum.tolist(), self.total_sum)
@multi_device
def test_fbeta_multiclass_metric(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMeasure()
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# check value
assert_allclose(precisions, self.desired_precisions)
assert_allclose(recalls, self.desired_recalls)
assert_allclose(fscores, self.desired_fscores)
# check type
assert isinstance(precisions, List)
assert isinstance(recalls, List)
assert isinstance(fscores, List)
@multi_device
def test_fbeta_multiclass_with_mask(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
mask = torch.tensor([True, True, True, True, True, False], device=device)
fbeta = FBetaMeasure()
fbeta(self.predictions, self.targets, mask)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(fbeta._pred_sum.tolist(), [1, 3, 0, 1, 0])
assert_allclose(fbeta._true_sum.tolist(), [2, 1, 0, 1, 1])
assert_allclose(fbeta._true_positive_sum.tolist(), [1, 1, 0, 1, 0])
desired_precisions = [1.00, 1 / 3, 0.00, 1.00, 0.00]
desired_recalls = [0.50, 1.00, 0.00, 1.00, 0.00]
desired_fscores = [
(2 * p * r) / (p + r) if p + r != 0.0 else 0.0
for p, r in zip(desired_precisions, desired_recalls)
]
assert_allclose(precisions, desired_precisions)
assert_allclose(recalls, desired_recalls)
assert_allclose(fscores, desired_fscores)
@multi_device
def test_fbeta_multiclass_macro_average_metric(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMeasure(average="macro")
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
macro_precision = torch.tensor(self.desired_precisions).mean()
macro_recall = torch.tensor(self.desired_recalls).mean()
macro_fscore = torch.tensor(self.desired_fscores).mean()
# check value
assert_allclose(precisions, macro_precision)
assert_allclose(recalls, macro_recall)
assert_allclose(fscores, macro_fscore)
# check type
assert isinstance(precisions, float)
assert isinstance(recalls, float)
assert isinstance(fscores, float)
@multi_device
def test_fbeta_multiclass_micro_average_metric(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
fbeta = FBetaMeasure(average="micro")
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
true_positives = torch.tensor([1, 1, 0, 1, 0], dtype=torch.float32)
false_positives = torch.tensor([0, 3, 0, 0, 0], dtype=torch.float32)
false_negatives = torch.tensor([2, 0, 0, 0, 1], dtype=torch.float32)
mean_true_positive = true_positives.mean()
mean_false_positive = false_positives.mean()
mean_false_negative = false_negatives.mean()
micro_precision = mean_true_positive / (mean_true_positive + mean_false_positive)
micro_recall = mean_true_positive / (mean_true_positive + mean_false_negative)
micro_fscore = (2 * micro_precision * micro_recall) / (micro_precision + micro_recall)
# check value
assert_allclose(precisions, micro_precision)
assert_allclose(recalls, micro_recall)
assert_allclose(fscores, micro_fscore)
@multi_device
def test_fbeta_multiclass_with_explicit_labels(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
# same prediction but with and explicit label ordering
fbeta = FBetaMeasure(labels=[4, 3, 2, 1, 0])
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
desired_precisions = self.desired_precisions[::-1]
desired_recalls = self.desired_recalls[::-1]
desired_fscores = self.desired_fscores[::-1]
# check value
assert_allclose(precisions, desired_precisions)
assert_allclose(recalls, desired_recalls)
assert_allclose(fscores, desired_fscores)
@multi_device
def test_fbeta_multiclass_with_macro_average(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
labels = [0, 1]
fbeta = FBetaMeasure(average="macro", labels=labels)
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
macro_precision = torch.tensor(self.desired_precisions)[labels].mean()
macro_recall = torch.tensor(self.desired_recalls)[labels].mean()
macro_fscore = torch.tensor(self.desired_fscores)[labels].mean()
# check value
assert_allclose(precisions, macro_precision)
assert_allclose(recalls, macro_recall)
assert_allclose(fscores, macro_fscore)
@multi_device
def test_fbeta_multiclass_with_micro_average(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
labels = [1, 3]
fbeta = FBetaMeasure(average="micro", labels=labels)
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
# We keep the expected values in CPU because FBetaMeasure returns them in CPU.
true_positives = torch.tensor([1, 1], dtype=torch.float32)
false_positives = torch.tensor([3, 0], dtype=torch.float32)
false_negatives = torch.tensor([0, 0], dtype=torch.float32)
mean_true_positive = true_positives.mean()
mean_false_positive = false_positives.mean()
mean_false_negative = false_negatives.mean()
micro_precision = mean_true_positive / (mean_true_positive + mean_false_positive)
micro_recall = mean_true_positive / (mean_true_positive + mean_false_negative)
micro_fscore = (2 * micro_precision * micro_recall) / (micro_precision + micro_recall)
# check value
assert_allclose(precisions, micro_precision)
assert_allclose(recalls, micro_recall)
assert_allclose(fscores, micro_fscore)
@multi_device
def test_fbeta_multiclass_with_weighted_average(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
labels = [0, 1]
fbeta = FBetaMeasure(average="weighted", labels=labels)
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
weighted_precision, weighted_recall, weighted_fscore, _ = precision_recall_fscore_support(
self.targets.cpu().numpy(),
self.predictions.argmax(dim=1).cpu().numpy(),
labels=labels,
average="weighted",
)
# check value
assert_allclose(precisions, weighted_precision)
assert_allclose(recalls, weighted_recall)
assert_allclose(fscores, weighted_fscore)
@multi_device
def test_fbeta_handles_batch_size_of_one(self, device: str):
predictions = torch.tensor([[0.2862, 0.3479, 0.1627, 0.2033]], device=device)
targets = torch.tensor([1], device=device)
mask = torch.tensor([True], device=device)
fbeta = FBetaMeasure()
fbeta(predictions, targets, mask)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
assert_allclose(precisions, [0.0, 1.0, 0.0, 0.0])
assert_allclose(recalls, [0.0, 1.0, 0.0, 0.0])
@multi_device
def test_fbeta_handles_no_prediction_false_last_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([0, 0], device=device)
fbeta = FBetaMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [1.0, 0.0])
assert_allclose(recalls, [0.5, 0.0])
assert_allclose(fscores, [0.6667, 0.0])
@multi_device
def test_fbeta_handles_no_prediction_true_last_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([0, 1], device=device)
fbeta = FBetaMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [1.0, 0.0])
assert_allclose(recalls, [1.0, 0.0])
assert_allclose(fscores, [1.0, 0.0])
@multi_device
def test_fbeta_handles_no_prediction_true_other_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([1, 0], device=device)
fbeta = FBetaMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [0.0, 0.0])
assert_allclose(recalls, [0.0, 0.0])
assert_allclose(fscores, [0.0, 0.0])
@multi_device
def test_fbeta_handles_no_prediction_true_all_class(self, device: str):
predictions = torch.tensor([[0.65, 0.35], [0.0, 0.0]], device=device)
# preds = [0, NA]
targets = torch.tensor([1, 1], device=device)
fbeta = FBetaMeasure()
fbeta(predictions, targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
assert_allclose(precisions, [0.0, 0.0])
assert_allclose(recalls, [0.0, 0.0])
assert_allclose(fscores, [0.0, 0.0])
def test_distributed_fbeta_measure(self):
predictions = [
torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]]
),
torch.tensor(
[[0.1, 0.5, 0.1, 0.2, 0.0], [0.1, 0.2, 0.1, 0.7, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]]
),
]
targets = [torch.tensor([0, 4, 1]), torch.tensor([0, 3, 0])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_metrics = {
"precision": self.desired_precisions,
"recall": self.desired_recalls,
"fscore": self.desired_fscores,
}
run_distributed_test(
[-1, -1],
global_distributed_metric,
FBetaMeasure(),
metric_kwargs,
desired_metrics,
exact=False,
)
def test_multiple_distributed_runs(self):
predictions = [
torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]]
),
torch.tensor(
[[0.1, 0.5, 0.1, 0.2, 0.0], [0.1, 0.2, 0.1, 0.7, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]]
),
]
targets = [torch.tensor([0, 4, 1]), torch.tensor([0, 3, 0])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_metrics = {
"precision": self.desired_precisions,
"recall": self.desired_recalls,
"fscore": self.desired_fscores,
}
run_distributed_test(
[-1, -1],
multiple_runs,
FBetaMeasure(),
metric_kwargs,
desired_metrics,
exact=False,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: FBetaMeasure,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
metric_values = metric.get_metric()
for key in desired_values:
assert_allclose(desired_values[key], metric_values[key])
| allennlp-master | tests/training/metrics/fbeta_measure_test.py |
from typing import Any, Dict, List, Tuple, Union
import torch
from nltk import Tree
from allennlp.common.testing import (
AllenNlpTestCase,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import EvalbBracketingScorer
class EvalbBracketingScorerTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
EvalbBracketingScorer.compile_evalb()
def tearDown(self):
EvalbBracketingScorer.clean_evalb()
super().tearDown()
def test_evalb_correctly_scores_identical_trees(self):
tree1 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
evalb_scorer = EvalbBracketingScorer()
evalb_scorer([tree1], [tree2])
metrics = evalb_scorer.get_metric()
assert metrics["evalb_recall"] == 1.0
assert metrics["evalb_precision"] == 1.0
assert metrics["evalb_f1_measure"] == 1.0
def test_evalb_correctly_scores_imperfect_trees(self):
# Change to constiutency label (VP ... )should effect scores, but change to POS
# tag (NP dog) should have no effect.
tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
evalb_scorer = EvalbBracketingScorer()
evalb_scorer([tree1], [tree2])
metrics = evalb_scorer.get_metric()
assert metrics["evalb_recall"] == 0.75
assert metrics["evalb_precision"] == 0.75
assert metrics["evalb_f1_measure"] == 0.75
def test_evalb_correctly_calculates_bracketing_metrics_over_multiple_trees(self):
tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
evalb_scorer = EvalbBracketingScorer()
evalb_scorer([tree1, tree2], [tree2, tree2])
metrics = evalb_scorer.get_metric()
assert metrics["evalb_recall"] == 0.875
assert metrics["evalb_precision"] == 0.875
assert metrics["evalb_f1_measure"] == 0.875
def test_evalb_with_terrible_trees_handles_nan_f1(self):
# If precision and recall are zero, evalb returns nan f1.
# This checks that we handle the zero division.
tree1 = Tree.fromstring(
"(PP (VROOT (PP That) (VROOT (PP could) (VROOT (PP cost) (VROOT (PP him))))) (PP .))"
)
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
evalb_scorer = EvalbBracketingScorer()
evalb_scorer([tree1], [tree2])
metrics = evalb_scorer.get_metric()
assert metrics["evalb_recall"] == 0.0
assert metrics["evalb_precision"] == 0.0
assert metrics["evalb_f1_measure"] == 0.0
def test_distributed_evalb(self):
tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
predicted_trees = [[tree1], [tree2]]
gold_trees = [[tree2], [tree2]]
metric_kwargs = {"predicted_trees": predicted_trees, "gold_trees": gold_trees}
desired_values = {
"evalb_recall": 0.875,
"evalb_precision": 0.875,
"evalb_f1_measure": 0.875,
}
run_distributed_test(
[-1, -1],
global_distributed_metric,
EvalbBracketingScorer(),
metric_kwargs,
desired_values,
exact=True,
)
def test_multiple_distributed_runs(self):
tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
predicted_trees = [[tree1], [tree2]]
gold_trees = [[tree2], [tree2]]
metric_kwargs = {"predicted_trees": predicted_trees, "gold_trees": gold_trees}
desired_values = {
"evalb_recall": 0.875,
"evalb_precision": 0.875,
"evalb_f1_measure": 0.875,
}
run_distributed_test(
[-1, -1],
multiple_runs,
EvalbBracketingScorer(),
metric_kwargs,
desired_values,
exact=False,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: EvalbBracketingScorer,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
metric_values = metric.get_metric()
for key in desired_values:
assert desired_values[key] == metric_values[key]
| allennlp-master | tests/training/metrics/evalb_bracketing_scorer_test.py |
import math
import pytest
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import SpearmanCorrelation
def spearman_formula(predictions, labels, mask=None):
"""
This function is spearman formula from:
https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient
"""
if mask is not None:
predictions = predictions * mask
labels = labels * mask
# if all number of a set is same, return np.nan
if len(torch.unique(predictions)) == 1 or len(torch.unique(labels)) == 1:
return float("NaN")
len_pre = len(predictions)
predictions = [(k, v) for k, v in enumerate(predictions)]
predictions.sort(key=lambda x: x[1], reverse=True)
predictions = [(k, v) for k, v in enumerate(predictions)]
predictions.sort(key=lambda x: x[1][0])
labels = [(k, v) for k, v in enumerate(labels)]
labels.sort(key=lambda x: x[1], reverse=True)
labels = [(k, v) for k, v in enumerate(labels)]
labels.sort(key=lambda x: x[1][0])
total = 0
for i in range(len_pre):
total += (predictions[i][0] - labels[i][0]) ** 2
expected_spearman_correlation = 1 - 6 * total / (len_pre * (len_pre ** 2 - 1))
return expected_spearman_correlation
class SpearmanCorrelationTest(AllenNlpTestCase):
@multi_device
def test_unmasked_computation(self, device: str):
spearman_correlation = SpearmanCorrelation()
batch_size = 10
num_labels = 10
predictions1 = torch.randn(batch_size, num_labels, device=device)
labels1 = 0.5 * predictions1 + torch.randn(batch_size, num_labels, device=device)
predictions2 = torch.randn(1, device=device).repeat(num_labels)
predictions2 = predictions2.unsqueeze(0).expand(batch_size, -1)
labels2 = torch.randn(1, device=device).expand(num_labels)
labels2 = 0.5 * predictions2 + labels2.unsqueeze(0).expand(batch_size, -1)
# in most cases, the data is constructed like predictions_1, the data of such a batch different.
# but in a few cases, for example, predictions_2, the data of such a batch is exactly the same.
predictions_labels_ = [(predictions1, labels1), (predictions2, labels2)]
for predictions, labels in predictions_labels_:
spearman_correlation.reset()
spearman_correlation(predictions, labels)
assert_allclose(
spearman_formula(predictions.reshape(-1), labels.reshape(-1)),
spearman_correlation.get_metric(),
)
@multi_device
def test_masked_computation(self, device: str):
spearman_correlation = SpearmanCorrelation()
batch_size = 10
num_labels = 10
predictions1 = torch.randn(batch_size, num_labels, device=device)
labels1 = 0.5 * predictions1 + torch.randn(batch_size, num_labels, device=device)
predictions2 = torch.randn(1, device=device).expand(num_labels)
predictions2 = predictions2.unsqueeze(0).expand(batch_size, -1)
labels2 = torch.randn(1, device=device).expand(num_labels)
labels2 = 0.5 * predictions2 + labels2.unsqueeze(0).expand(batch_size, -1)
# in most cases, the data is constructed like predictions_1, the data of such a batch different.
# but in a few cases, for example, predictions_2, the data of such a batch is exactly the same.
predictions_labels_ = [(predictions1, labels1), (predictions2, labels2)]
# Random binary mask
mask = torch.randint(0, 2, size=(batch_size, num_labels), device=device).bool()
for predictions, labels in predictions_labels_:
spearman_correlation.reset()
spearman_correlation(predictions, labels, mask)
expected_spearman_correlation = spearman_formula(
predictions.view(-1), labels.view(-1), mask=mask.view(-1)
)
# because add mask, a batch of predictions or labels will have many 0,
# spearman correlation algorithm will dependence the sorting position of a set of numbers,
# too many identical numbers will result in different calculation results each time
# but the positive and negative results are the same,
# so here we only test the positive and negative results of the results.
assert (expected_spearman_correlation * spearman_correlation.get_metric()) > 0
@multi_device
def test_reset(self, device: str):
spearman_correlation = SpearmanCorrelation()
batch_size = 10
num_labels = 10
predictions = torch.randn(batch_size, num_labels, device=device)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels, device=device)
# 1.test spearman_correlation.reset()
spearman_correlation.reset()
spearman_correlation(predictions, labels)
temp = spearman_correlation.get_metric()
spearman_correlation.reset()
spearman_correlation(predictions, labels)
assert spearman_correlation.get_metric() == temp
# 2.test spearman_correlation.reset()
spearman_correlation.reset()
spearman_correlation(predictions, labels)
spearman_correlation.get_metric(reset=False)
assert spearman_correlation.get_metric() != float("NaN")
spearman_correlation.get_metric(reset=True)
assert math.isnan(spearman_correlation.get_metric())
def test_distributed_spearman(self):
batch_size = 10
num_labels = 10
predictions = torch.randn(batch_size, num_labels)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
desired_spearman = spearman_formula(predictions.reshape(-1), labels.reshape(-1))
predictions = [predictions[:5], predictions[5:]]
labels = [labels[:5], labels[5:]]
metric_kwargs = {"predictions": predictions, "gold_labels": labels}
run_distributed_test(
[-1, -1],
global_distributed_metric,
SpearmanCorrelation(),
metric_kwargs,
desired_spearman,
exact=False,
)
def test_distributed_spearman_unequal_batches(self):
batch_size = 10
num_labels = 10
predictions = torch.randn(batch_size, num_labels)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
desired_spearman = spearman_formula(predictions.reshape(-1), labels.reshape(-1))
predictions = [predictions[:6], predictions[6:]]
labels = [labels[:6], labels[6:]]
metric_kwargs = {"predictions": predictions, "gold_labels": labels}
with pytest.raises(Exception) as _:
run_distributed_test(
[-1, -1],
global_distributed_metric,
SpearmanCorrelation(),
metric_kwargs,
desired_spearman,
exact=False,
)
| allennlp-master | tests/training/metrics/spearman_correlation_test.py |
allennlp-master | tests/training/learning_rate_schedulers/__init__.py |
|
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import (
LearningRateScheduler,
CombinedLearningRateScheduler,
PolynomialDecay,
)
from allennlp.training.optimizers import Optimizer
class TestCombinedLRScheduler(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.model = torch.nn.Sequential(torch.nn.Linear(10, 10))
self.optimizer = Optimizer.from_params(
model_parameters=self.model.named_parameters(),
params=Params({"type": "sgd", "lr": 1.0}),
)
def get_scheduler(self) -> LearningRateScheduler:
return LearningRateScheduler.from_params(
Params(
{
"type": "combined",
"schedulers": [
[
2,
{
"type": "polynomial_decay",
"warmup_steps": 10,
"end_learning_rate": 0.5,
},
],
[
5,
{
"type": "polynomial_decay",
"warmup_steps": 0,
"end_learning_rate": 0.1,
},
],
],
}
),
optimizer=self.optimizer,
num_steps_per_epoch=10,
)
def test_partial_schedule(self):
scheduler = self.get_scheduler()
assert isinstance(scheduler, CombinedLearningRateScheduler)
assert isinstance(scheduler._current_scheduler, PolynomialDecay)
# This should be 0 because the PolynomialDecay scheduler initializes the LR to 0.
assert self.optimizer.param_groups[0]["lr"] == 0.0
epoch_end_lrs = []
for epoch in range(10):
if epoch > 6:
assert scheduler._current_scheduler is None
elif epoch >= 2:
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 50
assert scheduler._current_scheduler.base_values[0] == 0.5
else:
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 20
assert scheduler._current_scheduler.base_values[0] == 1.0
for step in range(10):
scheduler.step_batch()
scheduler.step()
epoch_end_lrs.append(self.optimizer.param_groups[0]["lr"])
assert epoch_end_lrs[0] == 1.0
assert epoch_end_lrs[1] == 0.5
assert epoch_end_lrs[6] == 0.1
assert epoch_end_lrs[6] == 0.1
def test_load_from_checkpoint(self):
scheduler = self.get_scheduler()
for epoch in range(3):
for step in range(10):
scheduler.step_batch()
scheduler.step()
assert scheduler.last_epoch == 2
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 50
assert scheduler._current_scheduler.base_values[0] == 0.5
state_dict = scheduler.state_dict()
new_scheduler = self.get_scheduler()
new_scheduler.load_state_dict(state_dict)
assert new_scheduler.last_epoch == 2
assert new_scheduler._current_scheduler is not None
assert new_scheduler._current_scheduler.total_steps == 50
assert new_scheduler._current_scheduler.base_values[0] == 0.5, state_dict
| allennlp-master | tests/training/learning_rate_schedulers/combined_test.py |
import torch
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.training.optimizers import Optimizer
from allennlp.common.testing import AllenNlpTestCase
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.common.params import Params
class LearningRateSchedulersTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.model = torch.nn.Sequential(torch.nn.Linear(10, 10))
def test_reduce_on_plateau_error_throw_when_no_metrics_exist(self):
with pytest.raises(
ConfigurationError, match="learning rate scheduler requires a validation metric"
):
LearningRateScheduler.from_params(
optimizer=Optimizer.from_params(
model_parameters=self.model.named_parameters(), params=Params({"type": "adam"})
),
params=Params({"type": "reduce_on_plateau"}),
).step(None)
def test_reduce_on_plateau_works_when_metrics_exist(self):
LearningRateScheduler.from_params(
optimizer=Optimizer.from_params(
model_parameters=self.model.named_parameters(), params=Params({"type": "adam"})
),
params=Params({"type": "reduce_on_plateau"}),
).step(10)
def test_no_metric_wrapper_can_support_none_for_metrics(self):
lrs = LearningRateScheduler.from_params(
optimizer=Optimizer.from_params(
model_parameters=self.model.named_parameters(), params=Params({"type": "adam"})
),
params=Params({"type": "step", "step_size": 1}),
)
lrs.lr_scheduler.optimizer.step() # to avoid a pytorch warning
lrs.step(None)
def test_noam_learning_rate_schedule_does_not_crash(self):
lrs = LearningRateScheduler.from_params(
optimizer=Optimizer.from_params(
model_parameters=self.model.named_parameters(), params=Params({"type": "adam"})
),
params=Params({"type": "noam", "model_size": 10, "warmup_steps": 2000}),
)
lrs.step(None)
lrs.step_batch(None)
def test_polynomial_decay_works_properly(self):
scheduler = LearningRateScheduler.from_params(
optimizer=Optimizer.from_params(
model_parameters=self.model.named_parameters(),
params=Params({"type": "sgd", "lr": 1.0}),
),
params=Params(
{
"type": "polynomial_decay",
"warmup_steps": 2,
"num_epochs": 2,
"num_steps_per_epoch": 3,
"end_learning_rate": 0.1,
"power": 2,
}
),
)
optimizer = scheduler.optimizer
# Linear warmup for 2 steps.
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.5 # 1.0 * 1/2
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 1.0 # 1.0 * 2/2
# Polynomial decay for 4 steps.
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.60625 # (1.0 - 0.1) * (3/4) ** 2 + 0.1
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.325 # (1.0 - 0.1) * (2/4) ** 2 + 0.1
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.15625 # (1.0 - 0.1) * (1/4) ** 2 + 0.1
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.1 # (1.0 - 0.1) * (0/4) ** 2 + 0.1
def test_linear_with_warmup_works_properly(self):
scheduler = LearningRateScheduler.from_params(
optimizer=Optimizer.from_params(
model_parameters=self.model.named_parameters(),
params=Params({"type": "sgd", "lr": 1.0}),
),
params=Params(
{
"type": "linear_with_warmup",
"warmup_steps": 2,
"num_epochs": 2,
"num_steps_per_epoch": 3,
}
),
)
optimizer = scheduler.optimizer
# Linear warmup for 2 steps.
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.5 # 1.0 * 1/2
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 1.0 # 1.0 * 2/2
# Linear decay for 4 steps.
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.75
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.5
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.25
scheduler.step_batch()
assert optimizer.param_groups[0]["lr"] == 0.0
def test_exponential_works_properly(self):
scheduler = LearningRateScheduler.from_params(
optimizer=Optimizer.from_params(
model_parameters=self.model.named_parameters(),
params=Params({"type": "sgd", "lr": 1.0}),
),
params=Params({"type": "exponential", "gamma": 0.5}),
)
optimizer = scheduler.lr_scheduler.optimizer
optimizer.step() # to avoid a pytorch warning
# Initial learning rate should be unchanged for first epoch.
assert optimizer.param_groups[0]["lr"] == 1.0
scheduler.step()
assert optimizer.param_groups[0]["lr"] == 0.5
scheduler.step()
assert optimizer.param_groups[0]["lr"] == 0.5 ** 2
scheduler.step()
assert optimizer.param_groups[0]["lr"] == 0.5 ** 3
| allennlp-master | tests/training/learning_rate_schedulers/learning_rate_scheduler_test.py |
from copy import deepcopy
from typing import Dict, Any
import torch
import pytest
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
class CosineWithRestartsTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.model = torch.nn.Sequential(torch.nn.Linear(10, 10))
# We use these cases to verify that the scheduler works as expected.
# Each case consists of 5 parameters:
# - epochs: the total # of epochs to run for.
# - params: parameters passed to initialize the scheduler.
# - learning rate checks: a list of tuples, each of which specifies an epoch
# number and the expected value of the learning rate at that epoch.
# - checkpoints: a list of epoch numbers at which to save the scheduler
# state, and then restore from the saved state and resume.
self.cosine_schedule_cases = [
(
30,
{"t_initial": 30, "t_mul": 1.0},
[(0, 1.0), (15, 0.5000000000000001), (29, 0.0027390523158632996)],
[10, 14],
),
(10, {"t_initial": 1, "t_mul": 2.0}, [(0, 1.0), (1, 1.0), (2, 0.5), (3, 1.0)], [1, 3]),
(30, {"t_initial": 1, "t_mul": 1.0}, [(0, 1.0), (15, 1.0), (29, 1.0)], []),
(
60,
{"t_initial": 30, "t_mul": 1.0},
[
(0, 1.0),
(15, 0.5000000000000001),
(29, 0.0027390523158632996),
(30, 1.0),
(45, 0.5000000000000001),
(59, 0.0027390523158632996),
],
[30, 35],
),
(
60,
{"t_initial": 30, "t_mul": 1.0, "eta_mul": 0.5},
[(0, 1.0), (15, 0.5000000000000001), (29, 0.0027390523158632996), (30, 0.5)],
[],
),
(
100,
{"t_initial": 30, "t_mul": 1.5},
[(0, 1.0), (29, 0.0027390523158632996), (30, 1.0), (74, 0.0012179748700879012)],
[],
),
(
210,
{"t_initial": 30, "t_mul": 2},
[
(0, 1.0),
(29, 0.0027390523158632996),
(30, 1.0),
(89, 0.0006852326227130834),
(90, 1.0),
(209, 0.00017133751222137006),
],
[],
),
(
210,
{"t_initial": 30, "t_mul": 2, "eta_mul": 0.5},
[(0, 1.0), (30, 0.5), (90, 0.25)],
[29, 90],
),
(
150,
{"t_initial": 30, "t_mul": 1},
[
(0, 1.0),
(29, 0.0027390523158632996),
(30, 1.0),
(59, 0.0027390523158632996),
(60, 1.0),
(89, 0.0027390523158632996),
(90, 1.0),
],
[],
),
(10, {"t_initial": 1, "t_mul": 1, "eta_mul": 0.5}, [(0, 1.0), (1, 0.5), (2, 0.25)], []),
]
def _get_optimizer(self, lr: float = 1.0):
return Optimizer.from_params(
model_parameters=self.model.named_parameters(), params=Params({"type": "sgd", "lr": lr})
)
def test_from_params(self):
"""Make sure `from_params` initializes an instance properly."""
optim = self._get_optimizer()
sched = LearningRateScheduler.from_params(
optimizer=optim, params=Params({"type": "cosine", "t_initial": 5})
)
assert sched.t_initial == 5
assert sched.last_epoch == -1
# Learning should be unchanged after initializing scheduler.
assert optim.param_groups[0]["lr"] == 1.0
with pytest.raises(ConfigurationError):
# t_initial is required.
LearningRateScheduler.from_params(optimizer=optim, params=Params({"type": "cosine"}))
def test_schedules(self):
"""Make sure the math is correct."""
for epochs, params, lr_checks, _ in self.cosine_schedule_cases:
optimizer = self._get_optimizer()
params["type"] = "cosine"
scheduler = LearningRateScheduler.from_params(
optimizer=optimizer, params=Params(params)
)
lrs = [optimizer.param_groups[0]["lr"]]
for _ in range(epochs):
scheduler.step()
lrs.append(optimizer.param_groups[0]["lr"])
for it, lr in lr_checks:
assert lrs[it] == lr, f"Iteration {it}: {lrs[it]} != {lr}"
def test_schedules_with_save_and_resume(self):
"""Make sure scheduler will resume with the right state."""
def init_and_restore_scheduler(
optimizer: torch.optim.Optimizer,
params: Dict[str, Any],
state_dict: Dict[str, Any] = None,
):
"""
Initialize a new scheduler and optionally restore its state from
a checkpoint.
"""
params["type"] = "cosine"
scheduler = LearningRateScheduler.from_params(
optimizer=optimizer, params=Params(deepcopy(params))
)
if state_dict is not None:
scheduler.load_state_dict(state_dict)
return scheduler
for epochs, params, lr_checks, checkpoints in self.cosine_schedule_cases:
optimizer = self._get_optimizer()
scheduler = init_and_restore_scheduler(optimizer, params)
state = scheduler.state_dict()
lrs = [optimizer.param_groups[0]["lr"]]
for epoch in range(epochs):
if epoch in checkpoints:
# Restore scheduler from state dict.
scheduler = init_and_restore_scheduler(optimizer, params, state_dict=state)
# Take step and record learning rate.
scheduler.step(1)
lrs.append(optimizer.param_groups[0]["lr"])
# Save state again.
state = scheduler.state_dict()
for it, lr in lr_checks:
assert lrs[it] == lr, f"Iteration {it}: {lrs[it]} != {lr}"
| allennlp-master | tests/training/learning_rate_schedulers/cosine_test.py |
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Dict, List, Tuple
import torch
import pytest
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.common import Lazy, Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import PyTorchDataLoader
from allennlp.training import Trainer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler, SlantedTriangular
from allennlp.training.optimizers import Optimizer
def is_hat_shaped(learning_rates: List[float]):
"""
Check if the list of learning rates is "hat" shaped, i.e.,
increases then decreases
"""
# sufficient conditions:
# has both an increasing and decreasing segment
# decrease segment occurs after increasing segment
# once start decreasing, can't increase again
has_increasing_segment = False
has_decreasing_segment = False
for k in range(1, len(learning_rates)):
delta = learning_rates[k] - learning_rates[k - 1]
if delta > 1e-8:
has_increasing_segment = True
if has_decreasing_segment:
# can't increase again after hitting the max
return False
elif delta < -1e-8:
if not has_increasing_segment:
# can't decrease without have an increasing segment
return False
has_decreasing_segment = True
else:
# no change
pass
return has_increasing_segment and has_decreasing_segment
class SlantedTriangularTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.model = torch.nn.Sequential(
OrderedDict([("lin1", torch.nn.Linear(10, 10)), ("lin2", torch.nn.Linear(10, 10))])
)
def _get_optimizer(self, lr: float = 1.0):
optimizer_params = Params({"type": "sgd", "lr": lr})
optimizer_params["parameter_groups"] = [[[f"^{m}"], {}] for m in self.model._modules]
return Optimizer.from_params(
model_parameters=self.model.named_parameters(), params=optimizer_params
)
def _run_scheduler_get_lrs(self, params, num_steps_per_epoch):
optimizer = self._get_optimizer()
params["type"] = "slanted_triangular"
scheduler = LearningRateScheduler.from_params(
optimizer=optimizer, params=Params(deepcopy(params))
)
lrs = []
batch_num_total = 0
for epoch in range(params["num_epochs"]):
for _ in range(num_steps_per_epoch):
batch_num_total += 1
# allennlp trainer calls step_batch after updating parameters
# so collect lr at time of parameter update
lrs.append(
[
param_group["lr"] * float(param_group["params"][0].requires_grad)
for param_group in optimizer.param_groups[:2]
]
)
scheduler.step_batch(batch_num_total)
if params.get("gradual_unfreezing") and epoch == 0:
assert scheduler.freezing_current
# step() takes two arguments: validation metric and epoch
scheduler.step(None)
return lrs
def test_is_hat_shaped(self):
assert not is_hat_shaped([0.0] * 10)
assert not is_hat_shaped([float(k) for k in range(10)])
assert not is_hat_shaped([float(10 - k) for k in range(10)])
assert is_hat_shaped([float(k) for k in range(10)] + [float(10 - k) for k in range(10)])
assert not is_hat_shaped(
[float(k) for k in range(10)]
+ [float(10 - k) for k in range(10)]
+ [float(k) for k in range(10)]
)
def test_from_params_in_trainer(self):
# This is more of an integration test, making sure that a bunch of pieces fit together
# correctly, but it matters most for this learning rate scheduler, so we're testing it here.
params = Params(
{
"num_epochs": 5,
"learning_rate_scheduler": {
"type": "slanted_triangular",
"gradual_unfreezing": True,
"discriminative_fine_tuning": True,
"decay_factor": 0.5,
},
}
)
# The method called in the logic below only checks the length of this list, not its
# contents, so this should be safe.
instances = AllennlpDataset([1] * 40)
optim = self._get_optimizer()
trainer = Trainer.from_params(
model=self.model,
optimizer=Lazy(lambda **kwargs: optim),
serialization_dir=self.TEST_DIR,
params=params,
data_loader=PyTorchDataLoader(instances, batch_size=10),
)
assert isinstance(trainer._learning_rate_scheduler, SlantedTriangular)
# This is what we wrote this test for: to be sure that num_epochs is passed correctly, and
# that num_steps_per_epoch is computed and passed correctly. This logic happens inside of
# `Trainer.from_partial_objects`.
assert trainer._learning_rate_scheduler.num_epochs == 5
assert trainer._learning_rate_scheduler.num_steps_per_epoch == 4
# And we'll do one more to make sure that we can override num_epochs in the scheduler if we
# really want to. Not sure why you would ever want to in this case; this is just testing
# the functionality.
params = Params(
{
"num_epochs": 5,
"learning_rate_scheduler": {
"type": "slanted_triangular",
"num_epochs": 3,
"gradual_unfreezing": True,
"discriminative_fine_tuning": True,
"decay_factor": 0.5,
},
}
)
trainer = Trainer.from_params(
model=self.model,
optimizer=Lazy(lambda **kwargs: optim),
serialization_dir=self.TEST_DIR,
params=params,
data_loader=PyTorchDataLoader(instances, batch_size=10),
)
assert trainer._learning_rate_scheduler.num_epochs == 3
def test_from_params(self):
optim = self._get_optimizer()
sched = LearningRateScheduler.from_params(
optimizer=optim,
params=Params(
{
"type": "slanted_triangular",
"num_epochs": 5,
"num_steps_per_epoch": 10,
"gradual_unfreezing": True,
"discriminative_fine_tuning": True,
"decay_factor": 0.5,
}
),
)
assert sched.num_epochs == 5
assert sched.num_steps_per_epoch == 10
assert sched.gradual_unfreezing is True
assert sched.freezing_current is True
assert len(optim.param_groups) == 3
# The default parameter group in the Optimizer is empty
assert not optim.param_groups[-1]["params"]
assert optim.param_groups[-2]["lr"] == 1.0 / sched.ratio
assert optim.param_groups[-3]["lr"] == 0.5 / sched.ratio
with pytest.raises(ConfigurationError):
# num_epochs and num_steps_per_epoch are required
LearningRateScheduler.from_params(
optimizer=optim, params=Params({"type": "slanted_triangular", "num_epochs": 5})
)
LearningRateScheduler.from_params(
optimizer=optim,
params=Params({"type": "slanted_triangular", "num_steps_epochs": 10}),
)
def test_schedules(self):
slanted_triangular_cases: List[Tuple[Dict[str, Any], List[Tuple[int, int, float]]]] = [
(
{
"num_epochs": 5,
"num_steps_per_epoch": 10,
"gradual_unfreezing": True,
}, # parameters
[
(0, 1, 0.03125), # iteration, layer, learning rate
(0, 0, 0.0),
(1, 1, 1.0),
(1, 0, 0.0),
(9, 1, 0.138888),
(9, 0, 0.0), # end of the first epoch
(10, 1, 0.03125),
(10, 0, 0.03125),
(14, 1, 1.0),
(14, 0, 1.0),
(49, 1, 0.05815972),
(49, 0, 0.05815972),
],
),
(
{
"num_epochs": 5,
"num_steps_per_epoch": 10,
"discriminative_fine_tuning": True,
"decay_factor": 0.5,
}, # parameters
[
(0, 1, 0.03125), # iteration, layer, learning rate
(0, 0, 0.015625),
(5, 1, 1.0),
(5, 0, 0.5),
(49, 1, 0.052777),
(49, 0, 0.026388),
],
),
(
{
"num_epochs": 5,
"num_steps_per_epoch": 10,
"gradual_unfreezing": True,
"discriminative_fine_tuning": True,
"decay_factor": 0.5,
}, # parameters
[
(0, 1, 0.03125), # iteration, layer, learning rate
(0, 0, 0.0),
(1, 1, 1.0),
(1, 0, 0.0),
(9, 1, 0.138888),
(9, 0, 0.0), # end of the first epoch
(10, 1, 0.03125),
(10, 0, 0.015625),
(14, 1, 1.0),
(14, 0, 0.5),
(49, 1, 0.0581597222),
(49, 0, 0.0290798611),
],
),
]
for params, lr_checks in slanted_triangular_cases:
lrs = self._run_scheduler_get_lrs(params, params["num_steps_per_epoch"])
for it, layer, lr in lr_checks:
lr_check = round(lr, 5)
lr = round(lrs[it][layer], 5)
assert (
lr == lr_check
), f"Learning rate {lr} at iteration {it} at layer {layer} != {lr_check}."
def test_schedules_num_steps_per_epoch(self):
# ensure the learning rate schedule still maintains hat shape
# if number of actual batches differs from parameter provided
# in constructor
for gradual_unfreezing in [True, False]:
for discriminative_fine_tuning in [True, False]:
for num_actual_steps_per_epoch in [7, 11]:
params = {
"num_epochs": 5,
"num_steps_per_epoch": 10,
"gradual_unfreezing": gradual_unfreezing,
"discriminative_fine_tuning": discriminative_fine_tuning,
}
lrs = self._run_scheduler_get_lrs(params, num_actual_steps_per_epoch)
first_layer_lrs = [rates[0] for rates in lrs]
second_layer_lrs = [rates[1] for rates in lrs]
if gradual_unfreezing:
assert max(first_layer_lrs[:num_actual_steps_per_epoch]) < 1e-8
assert min(first_layer_lrs[:num_actual_steps_per_epoch]) > -1e-8
assert is_hat_shaped(first_layer_lrs[num_actual_steps_per_epoch:])
assert is_hat_shaped(second_layer_lrs[:num_actual_steps_per_epoch])
assert is_hat_shaped(second_layer_lrs[num_actual_steps_per_epoch:])
else:
assert is_hat_shaped(first_layer_lrs)
assert is_hat_shaped(second_layer_lrs)
| allennlp-master | tests/training/learning_rate_schedulers/slanted_triangular_test.py |
allennlp-master | tests/training/momentum_schedulers/__init__.py |
|
from math import isclose
import torch
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.optimizers import Optimizer
class InvertedTriangularTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.model = torch.nn.Sequential(torch.nn.Linear(10, 10))
self.base_momentum = 0.9
def _get_optimizer(self):
return Optimizer.from_params(
model_parameters=self.model.named_parameters(),
params=Params({"type": "sgd", "lr": 1.0, "momentum": self.base_momentum}),
)
def test_from_params(self):
optimizer = self._get_optimizer()
scheduler = MomentumScheduler.from_params(
optimizer=optimizer,
params=Params({"type": "inverted_triangular", "cool_down": 10, "warm_up": 10}),
)
assert scheduler.cool_down == 10
assert scheduler.warm_up == 10
assert scheduler.ratio == 10
assert scheduler.last_epoch == -1
def test_basic_schedule(self):
optimizer = self._get_optimizer()
scheduler = MomentumScheduler.from_params(
optimizer=optimizer,
params=Params(
{"type": "inverted_triangular", "cool_down": 6, "warm_up": 10, "ratio": 5}
),
)
# Before first epoch, momentum should be unchanged.
assert optimizer.param_groups[0]["momentum"] == self.base_momentum
# After first epoch, `step` is called, and momentum should be adjusted for
# the next epoch.
scheduler.step()
assert isclose(
optimizer.param_groups[0]["momentum"],
self.base_momentum - (self.base_momentum - self.base_momentum / 5) * (1 / 6),
)
# After second epoch, `step` is called and momentum is updated for 3rd epoch.
scheduler.step()
assert isclose(
optimizer.param_groups[0]["momentum"],
self.base_momentum - (self.base_momentum - self.base_momentum / 5) * (2 / 6),
)
scheduler.last_epoch = 4
# ... after the 6th epoch (epoch id 5), momentum should be set to `base_momentum / ratio`.
scheduler.step()
assert isclose(optimizer.param_groups[0]["momentum"], self.base_momentum / 5)
# Then the momentum stars increasing again.
scheduler.step()
assert isclose(
optimizer.param_groups[0]["momentum"],
self.base_momentum / 5 + (self.base_momentum - self.base_momentum / 5) * (1 / 10),
)
# After the 16th epoch (6 + 10) (epoch id 15), momentum should be back to the base level.
scheduler.last_epoch = 14
scheduler.step()
assert isclose(optimizer.param_groups[0]["momentum"], self.base_momentum)
scheduler.step()
assert isclose(optimizer.param_groups[0]["momentum"], self.base_momentum)
scheduler.step()
assert isclose(optimizer.param_groups[0]["momentum"], self.base_momentum)
| allennlp-master | tests/training/momentum_schedulers/inverted_triangular_test.py |
allennlp-master | tests/predictors/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestSentenceTaggerPredictor(AllenNlpTestCase):
def test_predictions_to_labeled_instances(self):
inputs = {"sentence": "Eric Wallace was an intern at AI2"}
archive = load_archive(
self.FIXTURES_ROOT / "simple_tagger" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "sentence_tagger")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert len(new_instances) > 1
for new_instance in new_instances:
assert "tags" in new_instance
assert len(new_instance["tags"]) == 7 # 7 words in input
| allennlp-master | tests/predictors/sentence_tagger_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp.nn import util
class TestPredictor(AllenNlpTestCase):
def test_from_archive_does_not_consume_params(self):
archive = load_archive(
self.FIXTURES_ROOT / "simple_tagger" / "serialization" / "model.tar.gz"
)
Predictor.from_archive(archive, "sentence_tagger")
# If it consumes the params, this will raise an exception
Predictor.from_archive(archive, "sentence_tagger")
def test_loads_correct_dataset_reader(self):
# This model has a different dataset reader configuration for train and validation. The
# parameter that differs is the token indexer's namespace.
archive = load_archive(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "sentence_tagger")
assert predictor._dataset_reader._token_indexers["tokens"].namespace == "test_tokens"
predictor = Predictor.from_archive(
archive, "sentence_tagger", dataset_reader_to_load="train"
)
assert predictor._dataset_reader._token_indexers["tokens"].namespace == "tokens"
predictor = Predictor.from_archive(
archive, "sentence_tagger", dataset_reader_to_load="validation"
)
assert predictor._dataset_reader._token_indexers["tokens"].namespace == "test_tokens"
def test_get_gradients(self):
inputs = {
"sentence": "I always write unit tests",
}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
labeled_instances = predictor.predictions_to_labeled_instances(instance, outputs)
for instance in labeled_instances:
grads = predictor.get_gradients([instance])[0]
assert "grad_input_1" in grads
assert grads["grad_input_1"] is not None
assert len(grads["grad_input_1"][0]) == 5 # 9 words in hypothesis
def test_get_gradients_when_requires_grad_is_false(self):
inputs = {
"sentence": "I always write unit tests",
}
archive = load_archive(
self.FIXTURES_ROOT
/ "basic_classifier"
/ "embedding_with_trainable_is_false"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
# ensure that requires_grad is initially False on the embedding layer
embedding_layer = util.find_embedding_layer(predictor._model)
assert not embedding_layer.weight.requires_grad
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
labeled_instances = predictor.predictions_to_labeled_instances(instance, outputs)
# ensure that gradients are always present, despite requires_grad being false on the embedding layer
for instance in labeled_instances:
grads = predictor.get_gradients([instance])[0]
assert bool(grads)
# ensure that no side effects remain
assert not embedding_layer.weight.requires_grad
def test_captures_model_internals(self):
inputs = {"sentence": "I always write unit tests"}
archive = load_archive(
self.FIXTURES_ROOT
/ "basic_classifier"
/ "embedding_with_trainable_is_false"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
with predictor.capture_model_internals() as internals:
predictor.predict_json(inputs)
assert len(internals) == 10
with predictor.capture_model_internals(r"_text_field_embedder.*") as internals:
predictor.predict_json(inputs)
assert len(internals) == 2
def test_predicts_batch_json(self):
inputs = {"sentence": "I always write unit tests"}
archive = load_archive(
self.FIXTURES_ROOT
/ "basic_classifier"
/ "embedding_with_trainable_is_false"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
results = predictor.predict_batch_json([inputs] * 3)
assert len(results) == 3
| allennlp-master | tests/predictors/predictor_test.py |
import math
from pytest import approx
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestTextClassifierPredictor(AllenNlpTestCase):
def test_uses_named_inputs(self):
inputs = {
"sentence": "It was the ending that I hated. I was disappointed that it was so bad."
}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
result = predictor.predict_json(inputs)
logits = result.get("logits")
assert logits is not None
assert isinstance(logits, list)
assert len(logits) == 2
assert all(isinstance(x, float) for x in logits)
probs = result.get("probs")
assert probs is not None
assert isinstance(probs, list)
assert len(probs) == 2
assert all(isinstance(x, float) for x in probs)
assert all(x >= 0 for x in probs)
assert sum(probs) == approx(1.0)
label = result.get("label")
assert label is not None
assert label in predictor._model.vocab.get_token_to_index_vocabulary(namespace="labels")
exps = [math.exp(x) for x in logits]
sum_exps = sum(exps)
for e, p in zip(exps, probs):
assert e / sum_exps == approx(p)
def test_batch_prediction(self):
batch_inputs = [
{"sentence": "It was the ending that I hated. I was disappointed that it was so bad."},
{"sentence": "This one is honestly the worst movie I've ever watched."},
]
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
results = predictor.predict_batch_json(batch_inputs)
assert len(results) == 2
for result in results:
logits = result.get("logits")
assert logits is not None
assert isinstance(logits, list)
assert len(logits) == 2
assert all(isinstance(x, float) for x in logits)
probs = result.get("probs")
assert probs is not None
assert isinstance(probs, list)
assert len(probs) == 2
assert all(isinstance(x, float) for x in probs)
assert all(x >= 0 for x in probs)
assert sum(probs) == approx(1.0)
label = result.get("label")
assert label is not None
assert label in predictor._model.vocab.get_token_to_index_vocabulary(namespace="labels")
exps = [math.exp(x) for x in logits]
sum_exps = sum(exps)
for e, p in zip(exps, probs):
assert e / sum_exps == approx(p)
def test_predictions_to_labeled_instances(self):
inputs = {
"sentence": "It was the ending that I hated. I was disappointed that it was so bad."
}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert "label" in new_instances[0].fields
assert new_instances[0].fields["label"] is not None
assert len(new_instances) == 1
| allennlp-master | tests/predictors/text_classifier_test.py |
import torch
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.models import load_archive, Model
from allennlp.nn.regularizers import RegularizerApplicator
class TestModel(AllenNlpTestCase):
def test_extend_embedder_vocab(self):
model_archive = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
original_weight = trained_model._text_field_embedder.token_embedder_tokens.weight
assert tuple(original_weight.shape) == (213, 10)
counter = {"tokens": {"unawarded": 1}}
trained_model.vocab._extend(counter)
trained_model.extend_embedder_vocab()
extended_weight = trained_model._text_field_embedder.token_embedder_tokens.weight
assert tuple(extended_weight.shape) == (214, 10)
assert torch.all(original_weight == extended_weight[:213, :])
def test_get_regularization_penalty(self):
class FakeModel(Model):
def forward(self, **kwargs):
return {}
class FakeRegularizerApplicator(RegularizerApplicator):
def __call__(self, module):
return 2.0
with pytest.raises(RuntimeError):
regularizer = FakeRegularizerApplicator()
model = FakeModel(None, regularizer)
model.get_regularization_penalty()
| allennlp-master | tests/models/model_test.py |
allennlp-master | tests/models/__init__.py |
|
import copy
import os
import tempfile
import tarfile
import pytest
import torch
from allennlp.commands.train import train_model
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers import DatasetReader
from allennlp.models.archival import archive_model, load_archive, CONFIG_NAME
def assert_models_equal(model, model2):
# check that model weights are the same
keys = set(model.state_dict().keys())
keys2 = set(model2.state_dict().keys())
assert keys == keys2
for key in keys:
assert torch.equal(model.state_dict()[key], model2.state_dict()[key])
# check that vocabularies are the same
vocab = model.vocab
vocab2 = model2.vocab
assert vocab._token_to_index == vocab2._token_to_index
assert vocab._index_to_token == vocab2._index_to_token
class ArchivalTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam", "cuda_device": -1},
}
)
def test_archiving(self):
# copy params, since they'll get consumed during training
params_copy = self.params.duplicate()
params_dict_copy = copy.deepcopy(self.params.as_dict())
# `train_model` should create an archive
serialization_dir = self.TEST_DIR / "archive_test"
model = train_model(self.params, serialization_dir=serialization_dir)
archive_path = serialization_dir / "model.tar.gz"
# load from the archive
archive = load_archive(archive_path)
model2 = archive.model
assert_models_equal(model, model2)
assert isinstance(
archive.dataset_reader,
type(DatasetReader.from_params(params_copy["dataset_reader"].duplicate())),
)
assert isinstance(
archive.validation_dataset_reader,
type(DatasetReader.from_params(params_copy["dataset_reader"].duplicate())),
) # validation_dataset_reader is not in the config, so fall back to dataset_reader
# check that params are the same
params2 = archive.config
assert params2.as_dict() == params_dict_copy
def test_archive_model_uses_archive_path(self):
serialization_dir = self.TEST_DIR / "serialization"
# Train a model
train_model(self.params, serialization_dir=serialization_dir)
# Use a new path.
archive_model(
serialization_dir=serialization_dir, archive_path=serialization_dir / "new_path.tar.gz"
)
archive = load_archive(serialization_dir / "new_path.tar.gz")
assert archive
def test_loading_serialization_directory(self):
# copy params, since they'll get consumed during training
params_dict_copy = copy.deepcopy(self.params.as_dict())
# `train_model` should create an archive
serialization_dir = self.TEST_DIR / "serialization"
model = train_model(self.params, serialization_dir=serialization_dir)
# load from the serialization directory itself
archive = load_archive(serialization_dir)
model2 = archive.model
assert_models_equal(model, model2)
# check that params are the same
params2 = archive.config
assert params2.as_dict() == params_dict_copy
def test_can_load_from_archive_model(self):
serialization_dir = self.FIXTURES_ROOT / "basic_classifier" / "from_archive_serialization"
archive_path = serialization_dir / "model.tar.gz"
model = load_archive(archive_path).model
# We want to be sure that we don't just not crash, but also be sure that we loaded the right
# weights for the model. We'll do that by making sure that we didn't just load the model
# that's in the `archive_path` of the config file, which is this one.
base_model_path = self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
base_model = load_archive(base_model_path).model
base_model_params = dict(base_model.named_parameters())
for name, parameters in model.named_parameters():
if parameters.size() == base_model_params[name].size():
assert not (parameters == base_model_params[name]).all()
else:
# In this case, the parameters are definitely different, no need for the above
# check.
pass
def test_include_in_archive(self):
self.params["include_in_archive"] = ["metrics_epoch_*.json"]
serialization_dir = self.TEST_DIR / "serialization"
# Train a model
train_model(self.params, serialization_dir=serialization_dir)
# Assert that the additional targets were archived
with tempfile.TemporaryDirectory() as tempdir:
with tarfile.open(serialization_dir / "model.tar.gz", "r:gz") as archive:
archive.extractall(tempdir)
assert os.path.isfile(os.path.join(tempdir, "metrics_epoch_0.json"))
assert os.path.isfile(os.path.join(tempdir, "metrics_epoch_1.json"))
assert not os.path.isfile(os.path.join(tempdir, "metrics.json"))
def test_invalid_include_in_archive(self):
self.params["include_in_archive"] = [CONFIG_NAME]
serialization_dir = self.TEST_DIR / "serialization"
with pytest.raises(ConfigurationError) as exc:
train_model(self.params, serialization_dir=serialization_dir)
assert "are saved names and cannot be used" in str(exc.value)
| allennlp-master | tests/models/archival_test.py |
import json
import pytest
from allennlp.common.testing import ModelTestCase
class ModelWithIncorrectValidationMetricTest(ModelTestCase):
"""
This test case checks some validating functionality that is implemented
in `ensure_model_can_train_save_and_load`
"""
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "simple_tagger" / "model_test_case.jsonnet",
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv",
)
def test_01_test_validation_metric_does_not_exist(self):
overrides = {"trainer.num_epochs": 2}
pytest.raises(
AssertionError,
self.ensure_model_can_train_save_and_load,
self.param_file,
metric_to_check="non_existent_metric",
metric_terminal_value=0.0,
overrides=json.dumps(overrides),
)
def test_02a_test_validation_metric_terminal_value_not_set(self):
pytest.raises(
AssertionError,
self.ensure_model_can_train_save_and_load,
self.param_file,
metric_to_check="accuracy",
metric_terminal_value=None,
)
def test_02b_test_validation_metric_terminal_value_not_met(self):
pytest.raises(
AssertionError,
self.ensure_model_can_train_save_and_load,
self.param_file,
metric_to_check="accuracy",
metric_terminal_value=0.0,
)
def test_03_test_validation_metric_exists_and_its_terminal_value_is_met(self):
self.ensure_model_can_train_save_and_load(
self.param_file,
metric_to_check="accuracy",
metric_terminal_value=1.0,
)
| allennlp-master | tests/models/test_model_test_case.py |
import numpy
from allennlp.common.testing import ModelTestCase
class TestBasicClassifier(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2vec.jsonnet",
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus.jsonl",
)
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
output_dict = self.model.make_output_human_readable(output_dict)
assert "label" in output_dict.keys()
probs = output_dict["probs"][0].data.numpy()
numpy.testing.assert_almost_equal(numpy.sum(probs, -1), numpy.array([1]))
def test_seq2vec_clf_can_train_save_and_load(self):
self.set_up_model(
self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2vec.jsonnet",
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus.jsonl",
)
self.ensure_model_can_train_save_and_load(self.param_file)
def test_seq2seq_clf_can_train_save_and_load(self):
self.set_up_model(
self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2seq.jsonnet",
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus.jsonl",
)
self.ensure_model_can_train_save_and_load(self.param_file)
| allennlp-master | tests/models/basic_classifier_test.py |
from flaky import flaky
import numpy
import pytest
import torch
from allennlp.common.testing import ModelTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data import DataLoader, PyTorchDataLoader
from allennlp.models import Model
from allennlp.training import GradientDescentTrainer, Trainer
class SimpleTaggerTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "simple_tagger" / "experiment.json",
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv",
)
def test_simple_tagger_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
output_dict = self.model.make_output_human_readable(output_dict)
class_probs = output_dict["class_probabilities"][0].data.numpy()
numpy.testing.assert_almost_equal(numpy.sum(class_probs, -1), numpy.array([1, 1, 1, 1]))
def test_forward_on_instances_ignores_loss_key_when_batched(self):
batch_outputs = self.model.forward_on_instances(self.dataset.instances)
for output in batch_outputs:
assert "loss" not in output.keys()
# It should be in the single batch case, because we special case it.
single_output = self.model.forward_on_instance(self.dataset.instances[0])
assert "loss" in single_output.keys()
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the encoder wrong - it should be 2 to match
# the embedding dimension from the text_field_embedder.
params["model"]["encoder"]["input_size"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
def test_regularization(self):
penalty = self.model.get_regularization_penalty()
assert penalty is None
data_loader = PyTorchDataLoader(self.instances, batch_size=32)
trainer = GradientDescentTrainer(self.model, None, data_loader) # optimizer,
# You get a RuntimeError if you call `model.forward` twice on the same inputs.
# The data and config are such that the whole dataset is one batch.
training_batch = next(iter(data_loader))
validation_batch = next(iter(data_loader))
training_loss = trainer.batch_outputs(training_batch, for_training=True)["loss"].item()
validation_loss = trainer.batch_outputs(validation_batch, for_training=False)["loss"].item()
# Training loss should have the regularization penalty, but validation loss should not.
numpy.testing.assert_almost_equal(training_loss, validation_loss)
class SimpleTaggerSpanF1Test(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "experiment.json",
self.FIXTURES_ROOT / "data" / "conll2003.txt",
)
def test_simple_tagger_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_simple_tagger_can_enable_span_f1(self):
assert self.model.calculate_span_f1 and self.model._f1_metric is not None
class SimpleTaggerRegularizationTest(ModelTestCase):
def setup_method(self):
super().setup_method()
param_file = self.FIXTURES_ROOT / "simple_tagger" / "experiment_with_regularization.json"
self.set_up_model(param_file, self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv")
params = Params.from_file(param_file)
self.reader = DatasetReader.from_params(params["dataset_reader"])
self.data_loader = DataLoader.from_params(
dataset=self.instances, params=params["data_loader"]
)
self.trainer = Trainer.from_params(
model=self.model,
data_loader=self.data_loader,
serialization_dir=self.TEST_DIR,
params=params.get("trainer"),
)
def test_regularization(self):
penalty = self.model.get_regularization_penalty().data
assert (penalty > 0).all()
penalty2 = 0
# Config specifies penalty as
# "regularizer": [
# ["weight$", {"type": "l2", "alpha": 10}],
# ["bias$", {"type": "l1", "alpha": 5}]
# ]
for name, parameter in self.model.named_parameters():
if name.endswith("weight"):
weight_penalty = 10 * torch.sum(torch.pow(parameter, 2))
penalty2 += weight_penalty
elif name.endswith("bias"):
bias_penalty = 5 * torch.sum(torch.abs(parameter))
penalty2 += bias_penalty
assert (penalty == penalty2.data).all()
# You get a RuntimeError if you call `model.forward` twice on the same inputs.
# The data and config are such that the whole dataset is one batch.
training_batch = next(iter(self.data_loader))
validation_batch = next(iter(self.data_loader))
training_batch_outputs = self.trainer.batch_outputs(training_batch, for_training=True)
training_loss = training_batch_outputs["loss"].data
assert (penalty == training_batch_outputs["reg_loss"]).all()
validation_loss = self.trainer.batch_outputs(validation_batch, for_training=False)[
"loss"
].data
# Training loss should have the regularization penalty, but validation loss should not.
assert (training_loss != validation_loss).all()
# Training loss should equal the validation loss plus the penalty.
penalized = validation_loss + penalty
assert (training_loss == penalized).all()
| allennlp-master | tests/models/simple_tagger_test.py |
import inspect
import os
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import Registrable
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import push_python_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.samplers import Sampler, BatchSampler
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.tokenizers.tokenizer import Tokenizer
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.regularizers.regularizer import Regularizer
class TestRegistrable(AllenNlpTestCase):
def test_registrable_functionality_works(self):
# This function tests the basic `Registrable` functionality:
#
# 1. The decorator should add things to the list.
# 2. The decorator should crash when adding a duplicate (unless exist_ok=True).
# 3. If a default is given, it should show up first in the list.
#
# What we don't test here is that built-in items are registered correctly. Those are
# tested in the other tests below.
#
# We'll test this with the Tokenizer class, just to have a concrete class to use, and one
# that has a default.
base_class = Tokenizer
assert "fake" not in base_class.list_available()
@base_class.register("fake")
class Fake(base_class):
pass
assert base_class.by_name("fake") == Fake
default = base_class.default_implementation
if default is not None:
assert base_class.list_available()[0] == default
base_class.default_implementation = "fake"
assert base_class.list_available()[0] == "fake"
with pytest.raises(ConfigurationError):
base_class.default_implementation = "not present"
base_class.list_available()
base_class.default_implementation = default
# Verify that registering under a name that already exists
# causes a ConfigurationError.
with pytest.raises(ConfigurationError):
@base_class.register("fake")
class FakeAlternate(base_class):
pass
# Registering under a name that already exists should overwrite
# if exist_ok=True.
@base_class.register("fake", exist_ok=True) # noqa
class FakeAlternate2(base_class):
pass
assert base_class.by_name("fake") == FakeAlternate2
del Registrable._registry[base_class]["fake"]
# TODO(mattg): maybe move all of these into tests for the base class?
def test_registry_has_builtin_samplers(self):
assert Sampler.by_name("random").__name__ == "RandomSampler"
assert Sampler.by_name("sequential").__name__ == "SequentialSampler"
assert BatchSampler.by_name("bucket").__name__ == "BucketBatchSampler"
def test_registry_has_builtin_tokenizers(self):
assert Tokenizer.by_name("spacy").__name__ == "SpacyTokenizer"
assert Tokenizer.by_name("character").__name__ == "CharacterTokenizer"
def test_registry_has_builtin_token_indexers(self):
assert TokenIndexer.by_name("single_id").__name__ == "SingleIdTokenIndexer"
assert TokenIndexer.by_name("characters").__name__ == "TokenCharactersIndexer"
def test_registry_has_builtin_regularizers(self):
assert Regularizer.by_name("l1").__name__ == "L1Regularizer"
assert Regularizer.by_name("l2").__name__ == "L2Regularizer"
def test_registry_has_builtin_token_embedders(self):
assert TokenEmbedder.by_name("embedding").__name__ == "Embedding"
assert TokenEmbedder.by_name("character_encoding").__name__ == "TokenCharactersEncoder"
def test_registry_has_builtin_text_field_embedders(self):
assert TextFieldEmbedder.by_name("basic").__name__ == "BasicTextFieldEmbedder"
def test_implicit_include_package(self):
# Create a new package in a temporary dir
packagedir = self.TEST_DIR / "testpackage"
packagedir.mkdir()
(packagedir / "__init__.py").touch()
# And add that directory to the path
with push_python_path(self.TEST_DIR):
# Write out a duplicate dataset reader there, but registered under a different name.
reader = DatasetReader.by_name("text_classification_json")
with open(inspect.getabsfile(reader)) as f:
code = f.read().replace(
"""@DatasetReader.register("text_classification_json")""",
"""@DatasetReader.register("text_classification_json-fake")""",
)
with open(os.path.join(packagedir, "reader.py"), "w") as f:
f.write(code)
# Fails to import by registered name
with pytest.raises(ConfigurationError) as exc:
DatasetReader.by_name("text_classification_json-fake")
assert "is not a registered name" in str(exc.value)
# Fails to import with wrong module name
with pytest.raises(ConfigurationError) as exc:
DatasetReader.by_name(
"testpackage.text_classification_json.TextClassificationJsonReader"
)
assert "unable to import module" in str(exc.value)
# Fails to import with wrong class name
with pytest.raises(ConfigurationError):
DatasetReader.by_name("testpackage.reader.FakeReader")
assert "unable to find class" in str(exc.value)
# Imports successfully with right fully qualified name
duplicate_reader = DatasetReader.by_name(
"testpackage.reader.TextClassificationJsonReader"
)
assert duplicate_reader.__name__ == "TextClassificationJsonReader"
| allennlp-master | tests/common/registrable_test.py |
from typing import Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union
import pytest
import torch
from allennlp.common import Lazy, Params, Registrable
from allennlp.common.from_params import FromParams, takes_arg, remove_optional, create_kwargs
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import DataLoader, DatasetReader, Tokenizer
from allennlp.models import Model
from allennlp.models.archival import load_archive
from allennlp.common.checks import ConfigurationError
class MyClass(FromParams):
def __init__(self, my_int: int, my_bool: bool = False) -> None:
self.my_int = my_int
self.my_bool = my_bool
class TestFromParams(AllenNlpTestCase):
def test_takes_arg(self):
def bare_function(some_input: int) -> int:
return some_input + 1
assert takes_arg(bare_function, "some_input")
assert not takes_arg(bare_function, "some_other_input")
class SomeClass:
total = 0
def __init__(self, constructor_param: str) -> None:
self.constructor_param = constructor_param
def check_param(self, check: str) -> bool:
return self.constructor_param == check
@classmethod
def set_total(cls, new_total: int) -> None:
cls.total = new_total
assert takes_arg(SomeClass, "self")
assert takes_arg(SomeClass, "constructor_param")
assert not takes_arg(SomeClass, "check")
assert takes_arg(SomeClass.check_param, "check")
assert not takes_arg(SomeClass.check_param, "other_check")
assert takes_arg(SomeClass.set_total, "new_total")
assert not takes_arg(SomeClass.set_total, "total")
def test_remove_optional(self):
optional_type = Optional[Dict[str, str]]
bare_type = remove_optional(optional_type) # type: ignore
bare_bare_type = remove_optional(bare_type)
assert bare_type == Dict[str, str]
assert bare_bare_type == Dict[str, str]
assert remove_optional(Optional[str]) == str
assert remove_optional(str) == str
def test_from_params(self):
my_class = MyClass.from_params(Params({"my_int": 10}), my_bool=True)
assert isinstance(my_class, MyClass)
assert my_class.my_int == 10
assert my_class.my_bool
def test_good_error_message_when_passing_non_params(self):
from allennlp.nn import InitializerApplicator
# This was how we used to take initializer params. We want to be sure we give a reasonable
# error message when something like this is passed to FromParams.
params = Params({"initializer": [["regex1", "uniform"], ["regex2", "orthogonal"]]})
with pytest.raises(ConfigurationError, match="dictionary.*InitializerApplicator"):
InitializerApplicator.from_params(params=params.pop("initializer"))
def test_create_kwargs(self):
kwargs = create_kwargs(MyClass, MyClass, Params({"my_int": 5}), my_bool=True, my_float=4.4)
# my_float should not be included because it's not a param of the MyClass constructor
assert kwargs == {"my_int": 5, "my_bool": True}
def test_extras(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
@A.register("c")
class C(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
# custom from params
@classmethod
def from_params(cls, params: Params, size: int, **extras) -> "C": # type: ignore
name = params.pop("name")
return cls(size=size, name=name)
# Check that extras get passed, even though A doesn't need them.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra")
assert b.name == "extra"
assert b.size == 10
# Check that extra extras don't get passed.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra", unwanted=True)
assert b.name == "extra"
assert b.size == 10
# Now the same with a custom from_params.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20)
assert c.name == "extra_c"
assert c.size == 20
# Check that extra extras don't get passed.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20, unwanted=True)
assert c.name == "extra_c"
assert c.size == 20
def test_extras_for_custom_classes(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
pass
class BaseClass2(Registrable):
pass
@BaseClass.register("A")
class A(BaseClass):
def __init__(self, a: int, b: int, val: str) -> None:
self.a = a
self.b = b
self.val = val
def __hash__(self):
return self.b
def __eq__(self, other):
return self.b == other.b
@classmethod
def from_params(cls, params: Params, a: int, **extras) -> "A": # type: ignore
# A custom from params
b = params.pop_int("b")
val = params.pop("val", "C")
params.assert_empty(cls.__name__)
return cls(a=a, b=b, val=val)
@BaseClass2.register("B")
class B(BaseClass2):
def __init__(self, c: int, b: int) -> None:
self.c = c
self.b = b
@classmethod
def from_params(cls, params: Params, c: int, **extras) -> "B": # type: ignore
b = params.pop_int("b")
params.assert_empty(cls.__name__)
return cls(c=c, b=b)
@BaseClass.register("E")
class E(BaseClass):
def __init__(self, m: int, n: int) -> None:
self.m = m
self.n = n
@classmethod
def from_params(cls, params: Params, **extras2) -> "E": # type: ignore
m = params.pop_int("m")
params.assert_empty(cls.__name__)
n = extras2["n"]
return cls(m=m, n=n)
class C:
pass
@BaseClass.register("D")
class D(BaseClass):
def __init__(
self,
arg1: List[BaseClass],
arg2: Tuple[BaseClass, BaseClass2],
arg3: Dict[str, BaseClass],
arg4: Set[BaseClass],
arg5: List[BaseClass],
) -> None:
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.arg4 = arg4
self.arg5 = arg5
vals = [1, 2, 3]
params = Params(
{
"type": "D",
"arg1": [
{"type": "A", "b": vals[0]},
{"type": "A", "b": vals[1]},
{"type": "A", "b": vals[2]},
],
"arg2": [{"type": "A", "b": vals[0]}, {"type": "B", "b": vals[0]}],
"arg3": {
"class_1": {"type": "A", "b": vals[0]},
"class_2": {"type": "A", "b": vals[1]},
},
"arg4": [
{"type": "A", "b": vals[0], "val": "M"},
{"type": "A", "b": vals[1], "val": "N"},
{"type": "A", "b": vals[1], "val": "N"},
],
"arg5": [{"type": "E", "m": 9}],
}
)
extra = C()
tval1 = 5
tval2 = 6
d = BaseClass.from_params(params=params, extra=extra, a=tval1, c=tval2, n=10)
# Tests for List # Parameters
assert len(d.arg1) == len(vals)
assert isinstance(d.arg1, list)
assert isinstance(d.arg1[0], A)
assert all(x.b == y for x, y in zip(d.arg1, vals))
assert all(x.a == tval1 for x in d.arg1)
# Tests for Tuple
assert isinstance(d.arg2, tuple)
assert isinstance(d.arg2[0], A)
assert isinstance(d.arg2[1], B)
assert d.arg2[0].a == tval1
assert d.arg2[1].c == tval2
assert d.arg2[0].b == d.arg2[1].b == vals[0]
# Tests for Dict
assert isinstance(d.arg3, dict)
assert isinstance(d.arg3["class_1"], A)
assert d.arg3["class_1"].a == d.arg3["class_2"].a == tval1
assert d.arg3["class_1"].b == vals[0]
assert d.arg3["class_2"].b == vals[1]
# Tests for Set
assert isinstance(d.arg4, set)
assert len(d.arg4) == 2
assert any(x.val == "M" for x in d.arg4)
assert any(x.val == "N" for x in d.arg4)
# Tests for custom extras parameters
assert isinstance(d.arg5, list)
assert isinstance(d.arg5[0], E)
assert d.arg5[0].m == 9
assert d.arg5[0].n == 10
def test_no_constructor(self):
params = Params({"type": "just_spaces"})
Tokenizer.from_params(params)
def test_union(self):
class A(FromParams):
def __init__(self, a: Union[int, List[int]]) -> None:
self.a = a
class B(FromParams):
def __init__(self, b: Union[A, List[A]]) -> None:
# Really you would want to be sure that `self.b` has a consistent type, but for
# this test we'll ignore that.
self.b = b
params = Params({"a": 3})
a = A.from_params(params)
assert a.a == 3
params = Params({"a": [3, 4, 5]})
a = A.from_params(params)
assert a.a == [3, 4, 5]
params = Params({"b": {"a": 3}})
b = B.from_params(params)
assert isinstance(b.b, A)
assert b.b.a == 3
params = Params({"b": [{"a": 3}, {"a": [4, 5]}]})
b = B.from_params(params)
assert isinstance(b.b, list)
assert b.b[0].a == 3
assert b.b[1].a == [4, 5]
def test_crazy_nested_union(self):
class A(FromParams):
def __init__(self, a: Union[int, List[int]]) -> None:
self.a = a
class B(FromParams):
def __init__(self, b: Union[A, List[A]]) -> None:
# Really you would want to be sure that `self.b` has a consistent type, but for
# this test we'll ignore that.
self.b = b
class C(FromParams):
def __init__(self, c: Union[A, B, Dict[str, A]]) -> None:
# Really you would want to be sure that `self.c` has a consistent type, but for
# this test we'll ignore that.
self.c = c
# This is a contrived, ugly example (why would you want to duplicate names in a nested
# structure like this??), but it demonstrates a potential bug when dealing with mutatable
# parameters. If you're not careful about keeping the parameters un-mutated in two
# separate places, you'll end up with a B, or with a dict that's missing the 'b' key.
params = Params({"c": {"a": {"a": 3}, "b": {"a": [4, 5]}}})
c = C.from_params(params)
assert isinstance(c.c, dict)
assert c.c["a"].a == 3
assert c.c["b"].a == [4, 5]
def test_union_of_castable_types(self):
class IntFloat(FromParams):
def __init__(self, a: Union[int, float]) -> None:
self.a = a
class FloatInt(FromParams):
def __init__(self, a: Union[float, int]) -> None:
self.a = a
float_param_str = '{"a": 1.0}'
int_param_str = '{"a": 1}'
import json
for expected_type, param_str in [(int, int_param_str), (float, float_param_str)]:
for cls in [IntFloat, FloatInt]:
c = cls.from_params(Params(json.loads(param_str)))
assert type(c.a) == expected_type
def test_invalid_type_conversions(self):
class A(FromParams):
def __init__(self, a: int) -> None:
self.a = a
with pytest.raises(TypeError):
A.from_params(Params({"a": "1"}))
with pytest.raises(TypeError):
A.from_params(Params({"a": 1.0}))
def test_dict(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Dict[str, A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": {"first": {"type": "b", "size": 1}, "second": {"type": "b", "size": 2}},
}
)
d = C.from_params(params)
assert isinstance(d.items, dict)
assert len(d.items) == 2
assert all(isinstance(key, str) for key in d.items.keys())
assert all(isinstance(value, B) for value in d.items.values())
assert d.items["first"].size == 1
assert d.items["second"].size == 2
def test_dict_not_params(self):
class A(FromParams):
def __init__(self, counts: Dict[str, int]) -> None:
self.counts = counts
params = Params({"counts": {"a": 10, "b": 20}})
a = A.from_params(params)
assert isinstance(a.counts, dict)
assert not isinstance(a.counts, Params)
def test_list(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: List[A]) -> None:
self.items = items
params = Params(
{"type": "d", "items": [{"type": "b", "size": 1}, {"type": "b", "size": 2}]}
)
d = C.from_params(params)
assert isinstance(d.items, list)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert d.items[0].size == 1
assert d.items[1].size == 2
def test_tuple(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, name: str) -> None:
self.name = name
class E(Registrable):
pass
@E.register("f")
class F(E):
def __init__(self, items: Tuple[A, C]) -> None:
self.items = items
params = Params(
{"type": "f", "items": [{"type": "b", "size": 1}, {"type": "d", "name": "item2"}]}
)
f = E.from_params(params)
assert isinstance(f.items, tuple)
assert len(f.items) == 2
assert isinstance(f.items[0], B)
assert isinstance(f.items[1], D)
assert f.items[0].size == 1
assert f.items[1].name == "item2"
def test_set(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
def __init__(self, name: str) -> None:
self.name = name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
@A.register("b")
class B(A):
pass
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Set[A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": [
{"type": "b", "name": "item1"},
{"type": "b", "name": "item2"},
{"type": "b", "name": "item2"},
],
}
)
d = C.from_params(params)
assert isinstance(d.items, set)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert any(item.name == "item1" for item in d.items)
assert any(item.name == "item2" for item in d.items)
def test_transferring_of_modules(self):
model_archive = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2seq.jsonnet")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder (freeze) and seq2seq_encoder params (tunable)
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_text_field_embedder",
"freeze": True,
}
}
model_params["seq2seq_encoder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_seq2seq_encoder",
"freeze": False,
}
}
transfer_model = Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
# TextFieldEmbedder and Seq2SeqEncoder parameters should be transferred
for trained_parameter, transfer_parameter in zip(
trained_model._text_field_embedder.parameters(),
transfer_model._text_field_embedder.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
for trained_parameter, transfer_parameter in zip(
trained_model._seq2seq_encoder.parameters(),
transfer_model._seq2seq_encoder.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
# Any other module's parameters shouldn't be same (eg. _feedforward)
for trained_parameter, transfer_parameter in zip(
trained_model._feedforward.parameters(),
transfer_model._feedforward.parameters(),
):
assert torch.all(trained_parameter != transfer_parameter)
# TextFieldEmbedder should have requires_grad Off
for parameter in transfer_model._text_field_embedder.parameters():
assert not parameter.requires_grad
# # Seq2SeqEncoder should have requires_grad On
for parameter in transfer_model._seq2seq_encoder.parameters():
assert parameter.requires_grad
def test_transferring_of_modules_ensures_type_consistency(self):
model_archive = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2seq.jsonnet")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder and make it load Seq2SeqEncoder
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_seq2seq_encoder._module",
}
}
with pytest.raises(ConfigurationError):
Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
def test_bare_string_params(self):
dataset = [1]
class TestLoader(Registrable):
@classmethod
def from_partial_objects(cls, data_loader: Lazy[DataLoader]) -> DataLoader:
return data_loader.construct(dataset=dataset)
TestLoader.register("test", constructor="from_partial_objects")(TestLoader)
data_loader = TestLoader.from_params(
Params(
{
"type": "test",
"data_loader": {
"batch_sampler": {
"type": "basic",
"batch_size": 2,
"drop_last": True,
"sampler": "random",
}
},
}
)
)
assert data_loader.batch_sampler.sampler.__class__.__name__ == "RandomSampler"
assert data_loader.batch_sampler.sampler.data_source is dataset
def test_kwargs_are_passed_to_superclass(self):
params = Params(
{"type": "text_classification_json", "lazy": True, "cache_directory": "tmp"}
)
reader = DatasetReader.from_params(params)
assert reader.lazy is True
assert str(reader._cache_directory) == "tmp"
def test_kwargs_with_multiple_inheritance(self):
# Basic idea: have two identical classes, differing only in the order of their multiple
# inheritance, and make sure that passing kwargs up to the super class works in both cases.
class A(Registrable):
def __init__(self, a: int):
self.a = a
from numbers import Number
@A.register("b1")
class B1(A, Number):
def __init__(self, b: float, **kwargs):
super().__init__(**kwargs)
self.b = b
@A.register("b2")
class B2(Number, A):
def __init__(self, b: float, **kwargs):
super().__init__(**kwargs)
self.b = b
b = B1.from_params(params=Params({"a": 4, "b": 5}))
assert b.b == 5
assert b.a == 4
b = B2.from_params(params=Params({"a": 4, "b": 5}))
assert b.b == 5
assert b.a == 4
def test_only_infer_superclass_params_if_unknown(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
def __init__(self):
self.x = None
self.a = None
self.rest = None
@BaseClass.register("a")
class A(BaseClass):
def __init__(self, a: int, x: int, **kwargs):
super().__init__()
self.x = x
self.a = a
self.rest = kwargs
@BaseClass.register("b")
class B(A):
def __init__(self, a: str, x: int = 42, **kwargs):
super().__init__(x=x, a=-1, raw_a=a, **kwargs)
params = Params({"type": "b", "a": "123"})
# The param `x` should not be required as it has default value in `B`
# The correct type of the param `a` should be inferred from `B` as well.
instance = BaseClass.from_params(params)
assert instance.x == 42
assert instance.a == -1
assert len(instance.rest) == 1
assert type(instance.rest["raw_a"]) == str
assert instance.rest["raw_a"] == "123"
def test_kwargs_are_passed_to_deeper_superclasses(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
def __init__(self):
self.a = None
self.b = None
self.c = None
@BaseClass.register("a")
class A(BaseClass):
def __init__(self, a: str):
super().__init__()
self.a = a
@BaseClass.register("b")
class B(A):
def __init__(self, b: str, **kwargs):
super().__init__(**kwargs)
self.b = b
@BaseClass.register("c")
class C(B):
def __init__(self, c, **kwargs):
super().__init__(**kwargs)
self.c = c
params = Params({"type": "c", "a": "a_value", "b": "b_value", "c": "c_value"})
instance = BaseClass.from_params(params)
assert instance.a == "a_value"
assert instance.b == "b_value"
assert instance.c == "c_value"
def test_lazy_construction_can_happen_multiple_times(self):
test_string = "this is a test"
extra_string = "extra string"
class ConstructedObject(FromParams):
def __init__(self, string: str, extra: str):
self.string = string
self.extra = extra
class Testing(FromParams):
def __init__(self, lazy_object: Lazy[ConstructedObject]):
first_time = lazy_object.construct(extra=extra_string)
second_time = lazy_object.construct(extra=extra_string)
assert first_time.string == test_string
assert first_time.extra == extra_string
assert second_time.string == test_string
assert second_time.extra == extra_string
Testing.from_params(Params({"lazy_object": {"string": test_string}}))
def test_optional_vs_required_lazy_objects(self):
class ConstructedObject(FromParams):
def __init__(self, a: int):
self.a = a
class Testing(FromParams):
def __init__(
self,
lazy1: Lazy[ConstructedObject],
lazy2: Lazy[ConstructedObject] = Lazy(ConstructedObject),
lazy3: Lazy[ConstructedObject] = None,
lazy4: Optional[Lazy[ConstructedObject]] = Lazy(ConstructedObject),
) -> None:
self.lazy1 = lazy1.construct()
self.lazy2 = lazy2.construct(a=2)
self.lazy3 = None if lazy3 is None else lazy3.construct()
self.lazy4 = None if lazy4 is None else lazy4.construct(a=1)
test1 = Testing.from_params(Params({"lazy1": {"a": 1}}))
assert test1.lazy1.a == 1
assert test1.lazy2.a == 2
assert test1.lazy3 is None
assert test1.lazy4 is not None
test2 = Testing.from_params(Params({"lazy1": {"a": 1}, "lazy2": {"a": 3}}))
assert test2.lazy1.a == 1
assert test2.lazy2.a == 3
assert test2.lazy3 is None
assert test2.lazy4 is not None
test3 = Testing.from_params(Params({"lazy1": {"a": 1}, "lazy3": {"a": 3}, "lazy4": None}))
assert test3.lazy1.a == 1
assert test3.lazy2.a == 2
assert test3.lazy3 is not None
assert test3.lazy3.a == 3
assert test3.lazy4 is None
with pytest.raises(ConfigurationError, match='key "lazy1" is required'):
Testing.from_params(Params({}))
def test_iterable(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Iterable[A]) -> None:
self.items = items
params = Params(
{"type": "d", "items": [{"type": "b", "size": 1}, {"type": "b", "size": 2}]}
)
d = C.from_params(params)
assert isinstance(d.items, Iterable)
items = list(d.items)
assert len(items) == 2
assert all(isinstance(item, B) for item in items)
assert items[0].size == 1
assert items[1].size == 2
def test_mapping(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Mapping[str, A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": {"first": {"type": "b", "size": 1}, "second": {"type": "b", "size": 2}},
}
)
d = C.from_params(params)
assert isinstance(d.items, Mapping)
assert len(d.items) == 2
assert all(isinstance(key, str) for key in d.items.keys())
assert all(isinstance(value, B) for value in d.items.values())
assert d.items["first"].size == 1
assert d.items["second"].size == 2
def test_extra_parameters_are_not_allowed_when_there_is_no_constructor(self):
class A(FromParams):
pass
with pytest.raises(ConfigurationError, match="Extra parameters"):
A.from_params(Params({"some_spurious": "key", "value": "pairs"}))
def test_explicit_kwargs_always_passed_to_constructor(self):
class Base(FromParams):
def __init__(self, lazy: bool = False, x: int = 0) -> None:
self.lazy = lazy
self.x = x
class A(Base):
def __init__(self, **kwargs) -> None:
assert "lazy" in kwargs
super().__init__(**kwargs)
A.from_params(Params({"lazy": False}))
class B(Base):
def __init__(self, **kwargs) -> None:
super().__init__(lazy=True, **kwargs)
b = B.from_params(Params({}))
assert b.lazy is True
def test_raises_when_there_are_no_implementations(self):
class A(Registrable):
pass
with pytest.raises(ConfigurationError, match="no registered concrete types"):
A.from_params("nonexistent_class")
with pytest.raises(ConfigurationError, match="no registered concrete types"):
A.from_params(Params({"some_spurious": "key", "value": "pairs"}))
with pytest.raises(ConfigurationError, match="no registered concrete types"):
A.from_params(Params({}))
# Some paths through the code are different if there is a constructor here versus not. We
# don't actually go through this logic anymore, but it's here as a regression test.
class B(Registrable):
def __init__(self):
pass
with pytest.raises(ConfigurationError, match="no registered concrete types"):
B.from_params("nonexistent_class")
with pytest.raises(ConfigurationError, match="no registered concrete types"):
B.from_params(Params({"some_spurious": "key", "value": "pairs"}))
with pytest.raises(ConfigurationError, match="no registered concrete types"):
B.from_params(Params({}))
def test_from_params_raises_error_on_wrong_parameter_name_in_optional_union(self):
class NestedClass(FromParams):
def __init__(self, varname: Optional[str] = None):
self.varname = varname
class WrapperClass(FromParams):
def __init__(self, nested_class: Optional[Union[str, NestedClass]] = None):
if isinstance(nested_class, str):
nested_class = NestedClass(varname=nested_class)
self.nested_class = nested_class
with pytest.raises(ConfigurationError):
WrapperClass.from_params(
params=Params({"nested_class": {"wrong_varname": "varstring"}})
)
def test_from_params_handles_base_class_kwargs(self):
class Foo(FromParams):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
self.a = a
self.b = b
for key, value in kwargs.items():
setattr(self, key, value)
foo = Foo.from_params(Params({"a": 2, "b": "hi"}))
assert foo.a == 2
assert foo.b == "hi"
foo = Foo.from_params(Params({"a": 2, "b": "hi", "c": {"2": "3"}}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == {"2": "3"}
class Bar(Foo):
def __init__(self, a: int, b: str, d: int, **kwargs) -> None:
super().__init__(a, b=b, **kwargs)
self.d = d
bar = Bar.from_params(Params({"a": 2, "b": "hi", "c": {"2": "3"}, "d": 0}))
assert bar.a == 2
assert bar.b == "hi"
assert bar.c == {"2": "3"}
assert bar.d == 0
class Baz(Foo):
def __init__(self, a: int, b: Optional[str] = "a", **kwargs) -> None:
super().__init__(a, b=b, **kwargs)
baz = Baz.from_params(Params({"a": 2, "b": None}))
assert baz.b is None
baz = Baz.from_params(Params({"a": 2}))
assert baz.b == "a"
def test_from_params_base_class_kwargs_crashes_if_params_not_handled(self):
class Bar(FromParams):
def __init__(self, c: str = None) -> None:
self.c = c
class Foo(Bar):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
super().__init__(**kwargs)
self.a = a
self.b = b
foo = Foo.from_params(Params({"a": 2, "b": "hi", "c": "some value"}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == "some value"
with pytest.raises(TypeError, match="invalid_key"):
Foo.from_params(Params({"a": 2, "b": "hi", "invalid_key": "some value"}))
def test_from_params_handles_kwargs_in_non_from_params_registered_class(self):
class Bar(Registrable):
pass
class Baz:
def __init__(self, a: int) -> None:
self.a = a
@Bar.register("foo")
class Foo(Baz):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
super().__init__(a)
self.b = b
for key, value in kwargs.items():
setattr(self, key, value)
foo = Bar.from_params(Params({"type": "foo", "a": 2, "b": "hi"}))
assert foo.a == 2
assert foo.b == "hi"
foo = Bar.from_params(Params({"type": "foo", "a": 2, "b": "hi", "c": {"2": "3"}}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == {"2": "3"}
def test_from_params_does_not_pass_extras_to_non_from_params_registered_class(self):
class Bar(Registrable):
pass
class Baz:
def __init__(self, a: int, c: Dict[str, str] = None) -> None:
self.a = a
self.c = c
@Bar.register("foo")
class Foo(Baz):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
super().__init__(a, **kwargs)
self.b = b
foo = Bar.from_params(Params({"type": "foo", "a": 2, "b": "hi"}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c is None
foo = Bar.from_params(
params=Params({"type": "foo", "a": 2, "b": "hi", "c": {"2": "3"}}), extra="4"
)
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == {"2": "3"}
def test_from_params_child_has_kwargs_base_implicit_constructor(self):
class Foo(FromParams):
pass
class Bar(Foo):
def __init__(self, a: int, **kwargs) -> None:
self.a = a
bar = Bar.from_params(Params({"a": 2}))
assert bar.a == 2
def test_from_params_has_args(self):
class Foo(FromParams):
def __init__(self, a: int, *args) -> None:
self.a = a
foo = Foo.from_params(Params({"a": 2}))
assert foo.a == 2
| allennlp-master | tests/common/from_params_test.py |
from collections import Counter
import os
import pathlib
import json
import time
import shutil
import pytest
import responses
from requests.exceptions import ConnectionError
from allennlp.common import file_utils
from allennlp.common.file_utils import (
_resource_to_filename,
filename_to_url,
get_from_cache,
cached_path,
_split_s3_path,
open_compressed,
CacheFile,
_Meta,
_find_entries,
inspect_cache,
remove_cache_entries,
)
from allennlp.common.testing import AllenNlpTestCase
def set_up_glove(url: str, byt: bytes, change_etag_every: int = 1000):
# Mock response for the datastore url that returns glove vectors
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/gzip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
etags_left = change_etag_every
etag = "0"
def head_callback(_):
"""
Writing this as a callback allows different responses to different HEAD requests.
In our case, we're going to change the ETag header every `change_etag_every`
requests, which will allow us to simulate having a new version of the file.
"""
nonlocal etags_left, etag
headers = {"ETag": etag}
# countdown and change ETag
etags_left -= 1
if etags_left <= 0:
etags_left = change_etag_every
etag = str(int(etag) + 1)
return (200, headers, "")
responses.add_callback(responses.HEAD, url, callback=head_callback)
class TestFileUtils(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.glove_file = self.FIXTURES_ROOT / "embeddings/glove.6B.100d.sample.txt.gz"
with open(self.glove_file, "rb") as glove:
self.glove_bytes = glove.read()
def test_cached_path_offline(self, monkeypatch):
# Ensures `cached_path` just returns the path to the latest cached version
# of the resource when there's no internet connection.
# First we mock the `_http_etag` method so that it raises a `ConnectionError`,
# like it would if there was no internet connection.
def mocked_http_etag(url: str):
raise ConnectionError
monkeypatch.setattr(file_utils, "_http_etag", mocked_http_etag)
url = "https://github.com/allenai/allennlp/blob/master/some-fake-resource"
# We'll create two cached versions of this fake resource using two different etags.
etags = ['W/"3e5885bfcbf4c47bc4ee9e2f6e5ea916"', 'W/"3e5885bfcbf4c47bc4ee9e2f6e5ea918"']
filenames = [
os.path.join(self.TEST_DIR, _resource_to_filename(url, etag)) for etag in etags
]
for filename, etag in zip(filenames, etags):
meta = _Meta(
resource=url, cached_path=filename, creation_time=time.time(), etag=etag, size=2341
)
meta.to_file()
with open(filename, "w") as f:
f.write("some random data")
# os.path.getmtime is only accurate to the second.
time.sleep(1.1)
# Should know to ignore lock files and extraction directories.
with open(filenames[-1] + ".lock", "w") as f:
f.write("")
os.mkdir(filenames[-1] + "-extracted")
# The version corresponding to the last etag should be returned, since
# that one has the latest "last modified" time.
assert get_from_cache(url, cache_dir=self.TEST_DIR) == filenames[-1]
# We also want to make sure this works when the latest cached version doesn't
# have a corresponding etag.
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url))
meta = _Meta(resource=url, cached_path=filename, creation_time=time.time(), size=2341)
with open(filename, "w") as f:
f.write("some random data")
assert get_from_cache(url, cache_dir=self.TEST_DIR) == filename
def test_resource_to_filename(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
"https://allennlp.s3.amazonaws.com" + "/long" * 20 + "/url",
]:
filename = _resource_to_filename(url)
assert "http" not in filename
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
json.dump(
{"url": url, "etag": None},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag is None
def test_resource_to_filename_with_etags(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = _resource_to_filename(url, etag="mytag")
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
baseurl = "http://allenai.org/"
assert _resource_to_filename(baseurl + "1") != _resource_to_filename(baseurl, etag="1")
def test_resource_to_filename_with_etags_eliminates_quotes(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = _resource_to_filename(url, etag='"mytag"')
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
def test_split_s3_path(self):
# Test splitting good urls.
assert _split_s3_path("s3://my-bucket/subdir/file.txt") == ("my-bucket", "subdir/file.txt")
assert _split_s3_path("s3://my-bucket/file.txt") == ("my-bucket", "file.txt")
# Test splitting bad urls.
with pytest.raises(ValueError):
_split_s3_path("s3://")
_split_s3_path("s3://myfile.txt")
_split_s3_path("myfile.txt")
@responses.activate
def test_get_from_cache(self):
url = "http://fake.datastore.com/glove.txt.gz"
set_up_glove(url, self.glove_bytes, change_etag_every=2)
filename = get_from_cache(url, cache_dir=self.TEST_DIR)
assert filename == os.path.join(self.TEST_DIR, _resource_to_filename(url, etag="0"))
assert os.path.exists(filename + ".json")
meta = _Meta.from_path(filename + ".json")
assert meta.resource == url
# We should have made one HEAD request and one GET request.
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 1
assert method_counts["GET"] == 1
# And the cached file should have the correct contents
with open(filename, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# A second call to `get_from_cache` should make another HEAD call
# but not another GET call.
filename2 = get_from_cache(url, cache_dir=self.TEST_DIR)
assert filename2 == filename
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 2
assert method_counts["GET"] == 1
with open(filename2, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# A third call should have a different ETag and should force a new download,
# which means another HEAD call and another GET call.
filename3 = get_from_cache(url, cache_dir=self.TEST_DIR)
assert filename3 == os.path.join(self.TEST_DIR, _resource_to_filename(url, etag="1"))
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 3
assert method_counts["GET"] == 2
with open(filename3, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
@responses.activate
def test_cached_path(self):
url = "http://fake.datastore.com/glove.txt.gz"
set_up_glove(url, self.glove_bytes)
# non-existent file
with pytest.raises(FileNotFoundError):
filename = cached_path(self.FIXTURES_ROOT / "does_not_exist" / "fake_file.tar.gz")
# unparsable URI
with pytest.raises(ValueError):
filename = cached_path("fakescheme://path/to/fake/file.tar.gz")
# existing file as path
assert cached_path(self.glove_file) == str(self.glove_file)
# caches urls
filename = cached_path(url, cache_dir=self.TEST_DIR)
assert len(responses.calls) == 2
assert filename == os.path.join(self.TEST_DIR, _resource_to_filename(url, etag="0"))
with open(filename, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# archives
filename = cached_path(
self.FIXTURES_ROOT / "common" / "quote.tar.gz!quote.txt",
extract_archive=True,
cache_dir=self.TEST_DIR,
)
with open(filename, "r") as f:
assert f.read().startswith("I mean, ")
def test_extract_with_external_symlink(self):
dangerous_file = self.FIXTURES_ROOT / "common" / "external_symlink.tar.gz"
with pytest.raises(ValueError):
cached_path(dangerous_file, extract_archive=True)
def test_open_compressed(self):
uncompressed_file = self.FIXTURES_ROOT / "embeddings/fake_embeddings.5d.txt"
with open_compressed(uncompressed_file) as f:
uncompressed_lines = [line.strip() for line in f]
for suffix in ["bz2", "gz"]:
compressed_file = f"{uncompressed_file}.{suffix}"
with open_compressed(compressed_file) as f:
compressed_lines = [line.strip() for line in f]
assert compressed_lines == uncompressed_lines
def test_meta_backwards_compatible(self):
url = "http://fake.datastore.com/glove.txt.gz"
etag = "some-fake-etag"
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url, etag))
with open(filename, "wb") as f:
f.write(self.glove_bytes)
with open(filename + ".json", "w") as meta_file:
json.dump({"url": url, "etag": etag}, meta_file)
meta = _Meta.from_path(filename + ".json")
assert meta.resource == url
assert meta.etag == etag
assert meta.creation_time is not None
assert meta.size == len(self.glove_bytes)
def create_cache_entry(self, url: str, etag: str, as_extraction_dir: bool = False):
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url, etag))
cache_path = filename
if as_extraction_dir:
cache_path = filename + "-extracted"
filename = filename + "-extracted/glove.txt"
os.mkdir(cache_path)
with open(filename, "wb") as f:
f.write(self.glove_bytes)
open(cache_path + ".lock", "a").close()
meta = _Meta(
resource=url,
cached_path=cache_path,
etag=etag,
creation_time=time.time(),
size=len(self.glove_bytes),
extraction_dir=as_extraction_dir,
)
meta.to_file()
def test_inspect(self, capsys):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry(
"http://fake.datastore.com/glove.txt.gz", "etag-3", as_extraction_dir=True
)
inspect_cache(cache_dir=self.TEST_DIR)
captured = capsys.readouterr()
assert "http://fake.datastore.com/glove.txt.gz" in captured.out
assert "2 versions cached" in captured.out
assert "1 version extracted" in captured.out
def test_inspect_with_patterns(self, capsys):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry("http://other.fake.datastore.com/glove.txt.gz", "etag-4")
inspect_cache(cache_dir=self.TEST_DIR, patterns=["http://fake.*"])
captured = capsys.readouterr()
assert "http://fake.datastore.com/glove.txt.gz" in captured.out
assert "2 versions" in captured.out
assert "http://other.fake.datastore.com/glove.txt.gz" not in captured.out
def test_remove_entries(self):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry(
"http://fake.datastore.com/glove.txt.gz", "etag-3", as_extraction_dir=True
)
self.create_cache_entry("http://other.fake.datastore.com/glove.txt.gz", "etag-4")
self.create_cache_entry(
"http://other.fake.datastore.com/glove.txt.gz", "etag-5", as_extraction_dir=True
)
reclaimed_space = remove_cache_entries(["http://fake.*"], cache_dir=self.TEST_DIR)
assert reclaimed_space == 3 * len(self.glove_bytes)
size_left, entries_left = _find_entries(cache_dir=self.TEST_DIR)
assert size_left == 2 * len(self.glove_bytes)
assert len(entries_left) == 1
entry_left = list(entries_left.values())[0]
# one regular cache file and one extraction dir
assert len(entry_left[0]) == 1
assert len(entry_left[1]) == 1
# Now remove everything.
remove_cache_entries(["*"], cache_dir=self.TEST_DIR)
assert len(os.listdir(self.TEST_DIR)) == 0
class TestCachedPathWithArchive(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.tar_file = self.TEST_DIR / "utf-8.tar.gz"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.tar.gz", self.tar_file
)
self.zip_file = self.TEST_DIR / "utf-8.zip"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.zip", self.zip_file
)
def check_extracted(self, extracted: str):
assert os.path.isdir(extracted)
assert pathlib.Path(extracted).parent == self.TEST_DIR
assert os.path.exists(os.path.join(extracted, "dummy.txt"))
assert os.path.exists(os.path.join(extracted, "folder/utf-8_sample.txt"))
assert os.path.exists(extracted + ".json")
def test_cached_path_extract_local_tar(self):
extracted = cached_path(self.tar_file, cache_dir=self.TEST_DIR, extract_archive=True)
self.check_extracted(extracted)
def test_cached_path_extract_local_zip(self):
extracted = cached_path(self.zip_file, cache_dir=self.TEST_DIR, extract_archive=True)
self.check_extracted(extracted)
@responses.activate
def test_cached_path_extract_remote_tar(self):
url = "http://fake.datastore.com/utf-8.tar.gz"
byt = open(self.tar_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/tar+gzip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, cache_dir=self.TEST_DIR, extract_archive=True)
assert extracted.endswith("-extracted")
self.check_extracted(extracted)
@responses.activate
def test_cached_path_extract_remote_zip(self):
url = "http://fake.datastore.com/utf-8.zip"
byt = open(self.zip_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/zip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, cache_dir=self.TEST_DIR, extract_archive=True)
assert extracted.endswith("-extracted")
self.check_extracted(extracted)
class TestCacheFile(AllenNlpTestCase):
def test_temp_file_removed_on_error(self):
cache_filename = self.TEST_DIR / "cache_file"
with pytest.raises(IOError, match="I made this up"):
with CacheFile(cache_filename) as handle:
raise IOError("I made this up")
assert not os.path.exists(handle.name)
assert not os.path.exists(cache_filename)
| allennlp-master | tests/common/file_utils_test.py |
import os
import logging
import random
from allennlp.common.logging import AllenNlpLogger
from allennlp.common.testing import AllenNlpTestCase
class TestLogging(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
logger = logging.getLogger(str(random.random()))
self.test_log_file = os.path.join(self.TEST_DIR, "test.log")
logger.addHandler(logging.FileHandler(self.test_log_file))
logger.setLevel(logging.DEBUG)
self.logger = logger
self._msg = "test message"
def test_debug_once(self):
self.logger.debug_once(self._msg)
self.logger.debug_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_info_once(self):
self.logger.info_once(self._msg)
self.logger.info_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_warning_once(self):
self.logger.warning_once(self._msg)
self.logger.warning_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_error_once(self):
self.logger.error_once(self._msg)
self.logger.error_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_critical_once(self):
self.logger.critical_once(self._msg)
self.logger.critical_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_debug_once_different_args(self):
self.logger.debug_once("There are %d lights.", 4)
self.logger.debug_once("There are %d lights.", 5)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
assert len(self.logger._seen_msgs) == 1
def test_getLogger(self):
logger = logging.getLogger("test_logger")
assert isinstance(logger, AllenNlpLogger)
| allennlp-master | tests/common/logging_test.py |
import pytest
from allennlp.common import cached_transformers
from allennlp.common.testing import AllenNlpTestCase
class TestCachedTransformers(AllenNlpTestCase):
def test_get_missing_from_cache_local_files_only(self):
with pytest.raises(ValueError) as execinfo:
cached_transformers.get(
"bert-base-uncased",
True,
cache_dir=self.TEST_DIR,
local_files_only=True,
)
assert str(execinfo.value) == (
"Cannot find the requested files in the cached path and "
"outgoing traffic has been disabled. To enable model "
"look-ups and downloads online, set 'local_files_only' "
"to False."
)
def test_get_tokenizer_missing_from_cache_local_files_only(self):
with pytest.raises(ValueError) as execinfo:
cached_transformers.get_tokenizer(
"bert-base-uncased",
cache_dir=self.TEST_DIR,
local_files_only=True,
)
assert str(execinfo.value) == (
"Cannot find the requested files in the cached path and "
"outgoing traffic has been disabled. To enable model "
"look-ups and downloads online, set 'local_files_only' "
"to False."
)
| allennlp-master | tests/common/cached_transformers_test.py |
allennlp-master | tests/common/__init__.py |
|
from overrides import overrides
from allennlp.commands import Subcommand
from allennlp.common.plugins import (
discover_plugins,
import_plugins,
)
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import pushd
class TestPlugins(AllenNlpTestCase):
@overrides
def setup_method(self):
super().setup_method()
self.plugins_root = self.FIXTURES_ROOT / "plugins"
def test_no_plugins(self):
available_plugins = set(discover_plugins())
assert available_plugins == set()
def test_file_plugin(self):
available_plugins = set(discover_plugins())
assert available_plugins == set()
with pushd(self.plugins_root):
available_plugins = set(discover_plugins())
assert available_plugins == {"d"}
import_plugins()
subcommands_available = Subcommand.list_available()
assert "d" in subcommands_available
| allennlp-master | tests/common/plugins_test.py |
from datetime import timedelta
import sys
from collections import OrderedDict
import pytest
import torch
from allennlp.common import util
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import push_python_path
class Unsanitizable:
pass
class Sanitizable:
def to_json(self):
return {"sanitizable": True}
class TestCommonUtils(AllenNlpTestCase):
def test_group_by_count(self):
assert util.group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 20) == [
[1, 2, 3],
[4, 5, 6],
[7, 20, 20],
]
def test_lazy_groups_of(self):
xs = [1, 2, 3, 4, 5, 6, 7]
groups = util.lazy_groups_of(iter(xs), group_size=3)
assert next(groups) == [1, 2, 3]
assert next(groups) == [4, 5, 6]
assert next(groups) == [7]
with pytest.raises(StopIteration):
_ = next(groups)
def test_pad_sequence_to_length(self):
assert util.pad_sequence_to_length([1, 2, 3], 5) == [1, 2, 3, 0, 0]
assert util.pad_sequence_to_length([1, 2, 3], 5, default_value=lambda: 2) == [1, 2, 3, 2, 2]
assert util.pad_sequence_to_length([1, 2, 3], 5, padding_on_right=False) == [0, 0, 1, 2, 3]
def test_namespace_match(self):
assert util.namespace_match("*tags", "tags")
assert util.namespace_match("*tags", "passage_tags")
assert util.namespace_match("*tags", "question_tags")
assert util.namespace_match("tokens", "tokens")
assert not util.namespace_match("tokens", "stemmed_tokens")
def test_sanitize(self):
assert util.sanitize(torch.Tensor([1, 2])) == [1, 2]
assert util.sanitize(torch.LongTensor([1, 2])) == [1, 2]
with pytest.raises(ValueError):
util.sanitize(Unsanitizable())
assert util.sanitize(Sanitizable()) == {"sanitizable": True}
def test_import_submodules(self):
(self.TEST_DIR / "mymodule").mkdir()
(self.TEST_DIR / "mymodule" / "__init__.py").touch()
(self.TEST_DIR / "mymodule" / "submodule").mkdir()
(self.TEST_DIR / "mymodule" / "submodule" / "__init__.py").touch()
(self.TEST_DIR / "mymodule" / "submodule" / "subsubmodule.py").touch()
with push_python_path(self.TEST_DIR):
assert "mymodule" not in sys.modules
assert "mymodule.submodule" not in sys.modules
util.import_module_and_submodules("mymodule")
assert "mymodule" in sys.modules
assert "mymodule.submodule" in sys.modules
assert "mymodule.submodule.subsubmodule" in sys.modules
def test_get_frozen_and_tunable_parameter_names(self):
model = torch.nn.Sequential(
OrderedDict([("conv", torch.nn.Conv1d(5, 5, 5)), ("linear", torch.nn.Linear(5, 10))])
)
named_parameters = dict(model.named_parameters())
named_parameters["linear.weight"].requires_grad_(False)
named_parameters["linear.bias"].requires_grad_(False)
(
frozen_parameter_names,
tunable_parameter_names,
) = util.get_frozen_and_tunable_parameter_names(model)
assert set(frozen_parameter_names) == {"linear.weight", "linear.bias"}
assert set(tunable_parameter_names) == {"conv.weight", "conv.bias"}
def test_sanitize_ptb_tokenized_string(self):
def create_surrounding_test_case(start_ptb_token, end_ptb_token, start_token, end_token):
return (
"a {} b c {} d".format(start_ptb_token, end_ptb_token),
"a {}b c{} d".format(start_token, end_token),
)
def create_fwd_token_test_case(fwd_token):
return "a {} b".format(fwd_token), "a {}b".format(fwd_token)
def create_backward_token_test_case(backward_token):
return "a {} b".format(backward_token), "a{} b".format(backward_token)
punct_forward = {"`", "$", "#"}
punct_backward = {".", ",", "!", "?", ":", ";", "%", "'"}
test_cases = [
# Parentheses
create_surrounding_test_case("-lrb-", "-rrb-", "(", ")"),
create_surrounding_test_case("-lsb-", "-rsb-", "[", "]"),
create_surrounding_test_case("-lcb-", "-rcb-", "{", "}"),
# Parentheses don't have to match
create_surrounding_test_case("-lsb-", "-rcb-", "[", "}"),
# Also check that casing doesn't matter
create_surrounding_test_case("-LsB-", "-rcB-", "[", "}"),
# Quotes
create_surrounding_test_case("``", "''", '"', '"'),
# Start/end tokens
create_surrounding_test_case("<s>", "</s>", "", ""),
# Tokens that merge forward
*[create_fwd_token_test_case(t) for t in punct_forward],
# Tokens that merge backward
*[create_backward_token_test_case(t) for t in punct_backward],
# Merge tokens starting with ' backwards
("I 'm", "I'm"),
# Merge tokens backwards when matching (n't or na) (special cases, parentheses behave in the same way)
("I do n't", "I don't"),
("gon na", "gonna"),
# Also make sure casing is preserved
("gon NA", "gonNA"),
# This is a no op
("A b C d", "A b C d"),
]
for ptb_string, expected in test_cases:
actual = util.sanitize_ptb_tokenized_string(ptb_string)
assert actual == expected
@pytest.mark.parametrize(
"size, result",
[
(12, "12B"),
(int(1.2 * 1024), "1.2K"),
(12 * 1024, "12K"),
(120 * 1024, "120K"),
(int(1.2 * 1024 * 1024), "1.2M"),
(12 * 1024 * 1024, "12M"),
(120 * 1024 * 1024, "120M"),
(int(1.2 * 1024 * 1024 * 1024), "1.2G"),
(12 * 1024 * 1024 * 1024, "12G"),
],
)
def test_format_size(size: int, result: str):
assert util.format_size(size) == result
@pytest.mark.parametrize(
"td, result",
[
(timedelta(days=2, hours=3), "2 days"),
(timedelta(days=1, hours=3), "1 day"),
(timedelta(hours=3, minutes=12), "3 hours"),
(timedelta(hours=1, minutes=12), "1 hour, 12 mins"),
(timedelta(minutes=12), "12 mins"),
],
)
def test_format_timedelta(td: timedelta, result: str):
assert util.format_timedelta(td) == result
| allennlp-master | tests/common/util_test.py |
import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import (
infer_and_cast,
Params,
parse_overrides,
unflatten,
with_fallback,
remove_keys_from_params,
)
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
@pytest.mark.parametrize("input_type", [dict, str])
def test_overrides(self, input_type):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = {
"train_data_path": "FOO",
"model": {"type": "BAR"},
"model.text_field_embedder.tokens.type": "BAZ",
"data_loader.batch_sampler.sorting_keys.0": "question",
}
params = Params.from_file(
filename, overrides if input_type == dict else json.dumps(overrides)
)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["data_loader"]["batch_sampler"]["sorting_keys"][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
# should do nothing to a non-flat dictionary
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
# incompatibility is ok
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
# goes deep
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
def test_remove_keys_from_params(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert params["data_loader"]["batch_sampler"]["type"] == "bucket"
assert params["data_loader"]["batch_sampler"]["batch_size"] == 80
remove_keys_from_params(params, keys=["batch_size"])
assert "batch_size" not in params["data_loader"]["batch_sampler"]
remove_keys_from_params(params, keys=["type", "batch_size"])
assert "type" not in params["data_loader"]["batch_sampler"]
remove_keys_from_params(params, keys=["data_loader"])
assert "data_loader" not in params
| allennlp-master | tests/common/params_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase, multi_device
actual_devices = set()
class TestTesting(AllenNlpTestCase):
@multi_device
def test_multi_device(self, device: str):
actual_devices.add(device)
def test_devices_accounted_for(self):
expected_devices = {"cpu", "cuda"} if torch.cuda.is_available() else {"cpu"}
assert expected_devices == actual_devices
| allennlp-master | tests/common/testing.py |
from pytest import raises
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.token_indexers import TokenCharactersIndexer
from allennlp.interpret.attackers import Hotflip
from allennlp.models.archival import load_archive
from allennlp.modules.token_embedders import EmptyEmbedder
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestHotflip(AllenNlpTestCase):
def test_hotflip(self):
inputs = {"sentence": "I always write unit tests for my code."}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "tokens", "grad_input_1")
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
def test_with_token_characters_indexer(self):
inputs = {"sentence": "I always write unit tests for my code."}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
predictor._dataset_reader._token_indexers["chars"] = TokenCharactersIndexer(
min_padding_length=1
)
predictor._model._text_field_embedder._token_embedders["chars"] = EmptyEmbedder()
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "tokens", "grad_input_1")
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
# This checks for a bug that arose with a change in the pytorch API. We want to be sure we
# can handle the case where we have to re-encode a vocab item because we didn't save it in
# our fake embedding matrix (see Hotflip docstring for more info).
hotflipper = Hotflip(predictor, max_tokens=50)
hotflipper.initialize()
hotflipper._first_order_taylor(
grad=torch.rand((10,)).numpy(), token_idx=torch.tensor(60), sign=1
)
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "I always write unit tests for my code."}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
hotflipper = Hotflip(predictor)
with raises(RuntimeError):
hotflipper.initialize()
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "I always write unit tests for my code"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "tokens", "grad_input_1")
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
| allennlp-master | tests/interpret/hotflip_test.py |
from pytest import approx, raises
from allennlp.common.testing import AllenNlpTestCase
from allennlp.interpret.saliency_interpreters import SimpleGradient
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestSimpleGradient(AllenNlpTestCase):
def test_simple_gradient_basic_text(self):
inputs = {"sentence": "It was the ending that I hated"}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
interpreter = SimpleGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
# two interpretations should be identical for gradient
repeat_interpretation = interpreter.saliency_interpret_from_json(inputs)
repeat_grad_input_1 = repeat_interpretation["instance_1"]["grad_input_1"]
for grad, repeat_grad in zip(grad_input_1, repeat_grad_input_1):
assert grad == approx(repeat_grad)
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
interpreter = SimpleGradient(predictor)
with raises(RuntimeError):
interpreter.saliency_interpret_from_json(inputs)
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
interpreter = SimpleGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
| allennlp-master | tests/interpret/simple_gradient_test.py |
from pytest import approx, raises
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.interpret.saliency_interpreters import IntegratedGradient
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestIntegratedGradient(AllenNlpTestCase):
def test_integrated_gradient(self):
inputs = {"sentence": "It was the ending that I hated"}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
interpreter = IntegratedGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
# two interpretations should be identical for integrated gradients
repeat_interpretation = interpreter.saliency_interpret_from_json(inputs)
repeat_grad_input_1 = repeat_interpretation["instance_1"]["grad_input_1"]
for grad, repeat_grad in zip(grad_input_1, repeat_grad_input_1):
assert grad == approx(repeat_grad)
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
interpreter = IntegratedGradient(predictor)
with raises(RuntimeError):
interpreter.saliency_interpret_from_json(inputs)
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
interpreter = IntegratedGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
| allennlp-master | tests/interpret/integrated_gradient_test.py |
allennlp-master | tests/interpret/__init__.py |
|
from pytest import raises
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.interpret.saliency_interpreters import SmoothGradient
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestSmoothGradient(AllenNlpTestCase):
def test_smooth_gradient(self):
inputs = {"sentence": "It was the ending that I hated"}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
interpreter = SmoothGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
assert len(interpretation["instance_1"]["grad_input_1"]) == 7 # 7 words in input
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
interpreter = SmoothGradient(predictor)
with raises(RuntimeError):
interpreter.saliency_interpret_from_json(inputs)
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
interpreter = SmoothGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
| allennlp-master | tests/interpret/smooth_gradient_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp.interpret.attackers import InputReduction
class TestInputReduction(AllenNlpTestCase):
def test_input_reduction(self):
# test using classification model
inputs = {"sentence": "I always write unit tests for my code."}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
reducer = InputReduction(predictor)
reduced = reducer.attack_from_json(inputs, "tokens", "grad_input_1")
assert reduced is not None
assert "final" in reduced
assert "original" in reduced
assert reduced["final"][0] # always at least one token
assert len(reduced["final"][0]) <= len(
reduced["original"]
) # input reduction removes tokens
for word in reduced["final"][0]: # no new words entered
assert word in reduced["original"]
# test using NER model (tests different underlying logic)
inputs = {"sentence": "Eric Wallace was an intern at AI2"}
archive = load_archive(
self.FIXTURES_ROOT / "simple_tagger" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "sentence_tagger")
reducer = InputReduction(predictor)
reduced = reducer.attack_from_json(inputs, "tokens", "grad_input_1")
assert reduced is not None
assert "final" in reduced
assert "original" in reduced
for reduced_input in reduced["final"]:
assert reduced_input # always at least one token
assert len(reduced_input) <= len(reduced["original"]) # input reduction removes tokens
for word in reduced_input: # no new words entered
assert word in reduced["original"]
| allennlp-master | tests/interpret/input_reduction_test.py |