|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
PyTorch utilities: Utilities related to PyTorch |
|
""" |
|
from typing import List, Optional, Tuple, Union |
|
|
|
from . import logging |
|
from .import_utils import is_torch_available, is_torch_version |
|
|
|
|
|
if is_torch_available(): |
|
import torch |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
try: |
|
from torch._dynamo import allow_in_graph as maybe_allow_in_graph |
|
except (ImportError, ModuleNotFoundError): |
|
|
|
def maybe_allow_in_graph(cls): |
|
return cls |
|
|
|
|
|
def randn_tensor( |
|
shape: Union[Tuple, List], |
|
generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, |
|
device: Optional["torch.device"] = None, |
|
dtype: Optional["torch.dtype"] = None, |
|
layout: Optional["torch.layout"] = None, |
|
): |
|
"""A helper function to create random tensors on the desired `device` with the desired `dtype`. When |
|
passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor |
|
is always created on the CPU. |
|
""" |
|
|
|
rand_device = device |
|
batch_size = shape[0] |
|
|
|
layout = layout or torch.strided |
|
device = device or torch.device("cpu") |
|
|
|
if generator is not None: |
|
gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type |
|
if gen_device_type != device.type and gen_device_type == "cpu": |
|
rand_device = "cpu" |
|
if device != "mps": |
|
logger.info( |
|
f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." |
|
f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" |
|
f" slighly speed up this function by passing a generator that was created on the {device} device." |
|
) |
|
elif gen_device_type != device.type and gen_device_type == "cuda": |
|
raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") |
|
|
|
|
|
if isinstance(generator, list) and len(generator) == 1: |
|
generator = generator[0] |
|
|
|
if isinstance(generator, list): |
|
shape = (1,) + shape[1:] |
|
latents = [ |
|
torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) |
|
for i in range(batch_size) |
|
] |
|
latents = torch.cat(latents, dim=0).to(device) |
|
else: |
|
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) |
|
|
|
return latents |
|
|
|
|
|
def is_compiled_module(module): |
|
"""Check whether the module was compiled with torch.compile()""" |
|
if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): |
|
return False |
|
return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) |
|
|