from contextlib import contextmanager
import copy
import itertools
import os
import inspect
import logging
import warnings
from typing import NamedTuple
import torch
from . import comm
import torch.distributed as dist
RPC_AVAILABLE = False
if dist.is_available():
from torch.distributed.distributed_c10d import _get_default_group
from torch.distributed.distributed_c10d import ReduceOp
if torch.distributed.rpc.is_available():
RPC_AVAILABLE = True
from torch.distributed.rpc import RRef
from ..modules import Module
from .replicate import replicate
from .scatter_gather import scatter_kwargs, gather, is_namedtuple
from .parallel_apply import parallel_apply
from torch._utils import _get_device_index, _get_all_device_indices
from ._functions import _get_stream
def _find_tensors(obj):
r"""
Recursively find all tensors contained in the specified object.
"""
if RPC_AVAILABLE and isinstance(obj, RRef):
# If the current node is the owner of the RRef, unwrap it and try to
# find Tensors.
# TODO: Expand to remote RRefs.
if obj.is_owner():
return _find_tensors(obj.local_value())
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
def _dump_DDP_relevant_env_vars():
relevant_env_vars = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"MASTER_PORT",
"MASTER_ADDR",
"CUDA_VISIBLE_DEVICES",
"GLOO_SOCKET_IFNAME",
"GLOO_DEVICE_TRANSPORT",
"NCCL_SOCKET_IFNAME",
"NCCL_BLOCKING_WAIT",
"NCCL_DEBUG",
"NCCL_DEBUG_SUBSYS",
"NCCL_IB_DISABLE",
# More NCCL env vars:
"NCCL_P2P_DISABLE",
"NCCL_P2P_LEVEL",
"NCCL_SHM_DISABLE",
"NCCL_SOCKET_NTHREADS",
"NCCL_NSOCKS_PERTHREAD",
"NCCL_BUFFSIZE",
"NCCL_NTHREADS",
"NCCL_RINGS",
"NCCL_MAX_NCHANNELS",
"NCCL_MIN_NCHANNELS",
"NCCL_CHECKS_DISABLE",
"NCCL_CHECK_POINTERS",
"NCCL_LAUNCH_MODE",
"NCCL_IB_HCA",
"NCCL_IB_TIMEOUT",
"NCCL_IB_RETRY_CNT",
"NCCL_IB_GID_INDEX",
"NCCL_IB_SL",
"NCCL_IB_TC",
"NCCL_IB_AR_THRESHOLD",
"NCCL_IB_CUDA_SUPPORT",
"NCCL_NET_GDR_LEVEL",
"NCCL_NET_GDR_READ",
"NCCL_SINGLE_RING_THRESHOLD",
"NCCL_LL_THRESHOLD",
"NCCL_TREE_THRESHOLD",
"NCCL_ALGO",
"NCCL_PROTO",
"NCCL_IGNORE_CPU_AFFINITY",
"NCCL_DEBUG_FILE",
"NCCL_COLLNET_ENABLE",
"NCCL_TOPO_FILE",
"NCCL_TOPO_DUMP_FILE",
]
formatted_output = ""
for var in relevant_env_vars:
value = os.environ[var] if var in os.environ else "N/A"
formatted_output += "env:%s=%s\n" % (var, value)
print(formatted_output)
class _DDPUnevenInputsConfig(NamedTuple):
ddp_join_enabled: bool
ddp_join_divide_by_initial_world_size: bool
class DistributedDataParallel(Module):
r"""Implements distributed data parallelism that is based on
``torch.distributed`` package at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine and each device, and
each such replica handles a portion of the input. During the backwards
pass, gradients from each node are averaged.
The batch size should be larger than the number of GPUs used locally.
See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.
The same constraints on input as in :class:`torch.nn.DataParallel` apply.
Creation of this class requires that ``torch.distributed`` to be already
initialized, by calling :func:`torch.distributed.init_process_group`.
``DistributedDataParallel`` is proven to be significantly faster than
:class:`torch.nn.DataParallel` for single-node multi-GPU data
parallel training.
To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn
up ``N`` processes, ensuring that each process exclusively works on a single
GPU from 0 to N-1. This can be done by either setting
``CUDA_VISIBLE_DEVICES`` for every process or by calling:
>>> torch.cuda.set_device(i)
where i is from 0 to N-1. In each process, you should refer the following
to construct this module:
>>> torch.distributed.init_process_group(
>>> backend='nccl', world_size=N, init_method='...'
>>> )
>>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)
In order to spawn up multiple processes per node, you can use either
``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.
.. note ::
Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__
for a brief introduction to all features related to distributed training.
.. note:: ``nccl`` backend is currently the fastest and highly recommended
backend when using GPUs. This applies to both single-node and
multi-node distributed training.
.. note:: This module also supports mixed-precision distributed training.
This means that your model can have different types of parameters such
as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these
mixed types of parameters will just work fine.
.. note:: If you use ``torch.save`` on one process to checkpoint the module,
and ``torch.load`` on some other processes to recover it, make sure that
``map_location`` is configured properly for every process. Without
``map_location``, ``torch.load`` would recover the module to devices
where the module was saved from.
.. note:: When a model is trained on ``M`` nodes with ``batch=N``, the
gradient will be ``M`` times smaller when compared to the same model
trained on a single node with ``batch=M*N`` if the loss is summed (NOT
averaged as usual) across instances in a batch (because the gradients
between different nodes are averaged). You should take this into
consideration when you want to obtain a mathematically equivalent
training process compared to the local training counterpart. But in most
cases, you can just treat a DistributedDataParallel wrapped model, a
DataParallel wrapped model and an ordinary model on a single GPU as the
same (E.g. using the same learning rate for equivalent batch size).
.. note::
Parameters are never broadcast between processes. The module performs
an all-reduce step on gradients and assumes that they will be modified
by the optimizer in all processes in the same way. Buffers
(e.g. BatchNorm stats) are broadcast from the module in process of rank
0, to all other replicas in the system in every iteration.
.. note::
If you are using DistributedDataParallel in conjunction with the
:ref:`distributed-rpc-framework`, you should always use
:meth:`torch.distributed.autograd.backward` to compute gradients and
:class:`torch.distributed.optim.DistributedOptimizer` for optimizing
parameters.
Example::
>>> import torch.distributed.autograd as dist_autograd
>>> from torch.nn.parallel import DistributedDataParallel as DDP
>>> from torch import optim
>>> from torch.distributed.optim import DistributedOptimizer
>>> from torch.distributed.rpc import RRef
>>>
>>> t1 = torch.rand((3, 3), requires_grad=True)
>>> t2 = torch.rand((3, 3), requires_grad=True)
>>> rref = rpc.remote("worker1", torch.add, args=(t1, t2))
>>> ddp_model = DDP(my_model)
>>>
>>> # Setup optimizer
>>> optimizer_params = [rref]
>>> for param in ddp_model.parameters():
>>> optimizer_params.append(RRef(param))
>>>
>>> dist_optim = DistributedOptimizer(
>>> optim.SGD,
>>> optimizer_params,
>>> lr=0.05,
>>> )
>>>
>>> with dist_autograd.context() as context_id:
>>> pred = ddp_model(rref.to_here())
>>> loss = loss_func(pred, loss)
>>> dist_autograd.backward(context_id, loss)
>>> dist_optim.step()
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) are distributed synchronization
points. Take that into account in case different processes might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
Same applies to buffers.
.. warning::
This module assumes all parameters are registered in the model of each
distributed processes are in the same order. The module itself will
conduct gradient ``allreduce`` following the reverse order of the
registered parameters of the model. In other words, it is users'
responsibility to ensure that each distributed process has the exact
same model and thus the exact same parameter registration order.
.. warning::
This module allows parameters with non-rowmajor-contiguous strides.
For example, your model may contain some parameters whose
:class:`torch.memory_format` is ``torch.contiguous_format``
and others whose format is ``torch.channels_last``. However,
corresponding parameters in different processes must have the
same strides.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. warning::
If you plan on using this module with a ``nccl`` backend or a ``gloo``
backend (that uses Infiniband), together with a DataLoader that uses
multiple workers, please change the multiprocessing start method to
``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
likely experience deadlocks if you don't change this setting.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
.. warning::
You should never try to change your model's parameters after wrapping
up your model with ``DistributedDataParallel``. Because, when
wrapping up your model with ``DistributedDataParallel``, the constructor
of ``DistributedDataParallel`` will register the additional gradient
reduction functions on all the parameters of the model itself at the
time of construction. If you change the model's parameters afterwards,
gradient redunction functions no longer match the correct set of
parameters.
.. warning::
Using ``DistributedDataParallel`` in conjunction with the
:ref:`distributed-rpc-framework` is experimental and subject to change.
.. warning::
The ``gradient_as_bucket_view`` mode does not yet work with Automatic
Mixed Precision (AMP). AMP maintains stashed gradients that are used for
unscaling gradients. With ``gradient_as_bucket_view=True``, these
stashed gradients will point to communication buckets in the first
iteration. In the next iteration, the communication buckets are mutated
and thus these stashed gradients will be unexpectedly mutated as well,
which might lead to wrong results.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices. This should
only be provided when the input module resides on a single
CUDA device. For single-device modules, the i'th
:attr:`module` replica is placed on ``device_ids[i]``. For
multi-device modules and CPU modules, ``device_ids`` must be
``None`` or an empty list, and input data for the forward
pass must be placed on the correct device. (default: all
visible devices for single-device modules)
output_device (int or torch.device): Device location of output for
single-device CUDA modules. For multi-device modules and
CPU modules, it must be ``None``, and the module itself
dictates the output location. (default: ``device_ids[0]``
for single-device modules)
broadcast_buffers (bool): Flag that enables syncing (broadcasting)
buffers of the module at beginning of the ``forward``
function. (default: ``True``)
process_group: The process group to be used for distributed data
all-reduction. If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into
multiple buckets so that gradient reduction of each
bucket can potentially overlap with backward computation.
:attr:`bucket_cap_mb` controls the bucket size in
MegaBytes (MB). (default: 25)
find_unused_parameters (bool): Traverse the autograd graph from all
tensors contained in the return value of the
wrapped module's ``forward`` function. Parameters
that don't receive gradients as part of this
graph are preemptively marked as being ready to
be reduced. Note that all ``forward`` outputs
that are derived from module parameters must
participate in calculating loss and later the
gradient computation. If they don't, this wrapper
will hang waiting for autograd to produce
gradients for those parameters. Any outputs
derived from module parameters that are otherwise
unused can be detached from the autograd graph
using ``torch.Tensor.detach``. (default: ``False``)
check_reduction: This argument is deprecated.
gradient_as_bucket_view (bool): This is a prototype feature and subject
to changes. When set to ``True``, gradients will be views
pointing to different offsets of ``allreduce`` communication
buckets. This can reduce peak memory usage, where the
saved memory size will be equal to the total gradients
size. Moreover, it avoids the overhead of copying between
gradients and ``allreduce`` communication buckets. When
gradients are views, ``detach_()`` cannot be called on the
gradients. If hitting such errors, please fix it by
referring to the :meth:`~torch.optim.Optimizer.zero_grad`
function in ``torch/optim/optimizer.py`` as a solution.
Loading ...