__all__ = ["shutdown", "get_worker_info", "remote", "rpc_sync",
"rpc_async", "RRef", "AllGatherStates", "method_factory", "new_method"]
import collections
import contextlib
import functools
import inspect
import logging
import threading
from typing import Dict, Generic, TypeVar, Set, Any
import torch
from torch.futures import Future
from torch._C._distributed_rpc import (
PyRRef,
RemoteProfilerManager,
WorkerInfo,
TensorPipeAgent,
get_rpc_timeout,
_cleanup_python_rpc_handler,
_delete_all_user_and_unforked_owner_rrefs,
_destroy_rref_context,
_get_current_rpc_agent,
_invoke_remote_builtin,
_invoke_remote_python_udf,
_invoke_remote_torchscript,
_invoke_rpc_builtin,
_invoke_rpc_python_udf,
_invoke_rpc_torchscript,
_is_current_rpc_agent_set,
_reset_current_rpc_agent,
_set_and_start_rpc_agent,
)
from .internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from .constants import DEFAULT_SHUTDOWN_TIMEOUT, UNSET_RPC_TIMEOUT
from ._utils import _group_membership_management, _update_group_membership
logger = logging.getLogger(__name__)
# NB: Ignoring RRef leaks during shutdown. Without this, applications have to
# make sure there is no references to any RRef in the application code and
# Python GC has done its job to delete those RRefs. This is could result in bad
# debugging experiences especially when for large applications. Therefore, by
# default, we are going to ignore RRef leaks during shutdown. This is usually
# fine as shutdown means applications have done training and no longer care
# about states.
#
# To enable RRef leak checking, set this _ignore_rref_leak to False
_ignore_rref_leak = True
_default_pickler = _internal_rpc_pickler
@contextlib.contextmanager
def _use_rpc_pickler(rpc_pickler):
r"""
rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler
"""
global _default_pickler
_default_pickler = rpc_pickler
try:
yield
finally:
_default_pickler = _internal_rpc_pickler
def _require_initialized(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not _is_current_rpc_agent_set():
raise RuntimeError(
"RPC has not been initialized. Call "
"torch.distributed.rpc.init_rpc first."
)
return func(*args, **kwargs)
return wrapper
class AllGatherStates:
def __init__(self):
# Each `gathered_objects` is an empty dict at beginning.
# The leader worker is elected as the first worker in a sorted worker
# name list. Whenever there is a worker entering `_all_gather()`, it
# runs `_gather_to_leader()` on the leader to add its own name and
# data obj to this dict. The leader also adds itself's name to the dict
# on calling `_all_gather()`.
# Once `set(gathered_objects.keys()) == _ALL_WORKER_NAMES`, the leader
# will broadcast the gathered dict to all follower workers and set their
# `gathered_objects` field and the `proceed_signal` field.
self.gathered_objects = {}
# All workers wait on this signal until it receives all gathered
# objects.
self.proceed_signal = threading.Event()
# States used by `def _all_gather()`.
# `_ALL_WORKER_NAMES` is initialized on initiaizing RPC layer.
_ALL_WORKER_NAMES: Set[Any] = set()
_all_gather_dict_lock = threading.RLock()
_all_gather_sequence_id: Dict[str, int] = {}
_all_gather_sequence_id_to_states: collections.defaultdict = collections.defaultdict(AllGatherStates)
def _init_rpc_states(agent):
worker_infos = agent.get_worker_infos()
global _ALL_WORKER_NAMES
_ALL_WORKER_NAMES = {worker_info.name for worker_info in worker_infos}
# NB: backend implementation might have already set the rpc_agent.
if not _is_current_rpc_agent_set():
_set_and_start_rpc_agent(agent)
def _gather_to_leader(sequence_id, worker_name, obj, worker_names=None):
with _all_gather_dict_lock:
if not worker_names:
worker_names = _ALL_WORKER_NAMES
assert (
worker_name in worker_names
), f"{worker_name} is not expected by leader."
states = _all_gather_sequence_id_to_states[sequence_id]
assert (
worker_name not in states.gathered_objects
), f"{worker_name} reported intent sequence id {sequence_id} twice. "
states.gathered_objects[worker_name] = obj
if worker_names == set(states.gathered_objects.keys()):
states.proceed_signal.set()
def _broadcast_to_followers(sequence_id, objects_map):
with _all_gather_dict_lock:
states = _all_gather_sequence_id_to_states[sequence_id]
assert (
not states.proceed_signal.is_set()
), "Termination signal sequence id {} got set twice.".format(sequence_id)
states.gathered_objects = objects_map
states.proceed_signal.set()
_thread_local_var = threading.local()
@contextlib.contextmanager
def _wait_all():
r"""
A context manager that collects all futures returned by ``rpc_async`` and
waits them on the context manager's exit; relieving the user of needing
to explicitly call wait.
Example::
>>> # xdoctest: +SKIP("distributed")
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> with rpc._wait_all():
>>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
>>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
>>> #fut_1 and fut_2 are waited on
"""
_thread_local_var.future_list = []
try:
yield
finally:
try:
torch.futures.wait_all(_thread_local_var.future_list)
finally:
del _thread_local_var.future_list
@_require_initialized
def _all_gather(obj, worker_names=None, timeout=UNSET_RPC_TIMEOUT):
r"""
This is similar to torch.distributed.all_gather(), but is using RPC. It
picks the worker with the smallest name (alphabetic order) as the leader.
Then all followers send their data ``obj`` to the leader. After the leader
has received all, it will broadcast the results back to all followers. This
function blocks until all workers have received the gathered results.
"""
if not worker_names:
assert (
_ALL_WORKER_NAMES is not None
), "`_ALL_WORKER_NAMES` is not initialized for `def _all_gather`."
worker_names = _ALL_WORKER_NAMES
leader_name = min(worker_names)
self_name = _get_current_rpc_agent().get_worker_info().name
with _all_gather_dict_lock:
concat_names = "".join(sorted(worker_names))
sequence_num = _all_gather_sequence_id.get(concat_names, 0)
_all_gather_sequence_id[concat_names] = sequence_num + 1
sequence_id = concat_names + str(sequence_num)
is_leader = leader_name == self_name
if timeout == UNSET_RPC_TIMEOUT:
# Timeout is specified by agent for RPC calls
rpc_timeout = get_rpc_timeout()
# No timeout for signal
signal_timeout = None
elif timeout == DEFAULT_SHUTDOWN_TIMEOUT:
# No timeout for RPC
rpc_timeout = timeout
# No timeout for signal
signal_timeout = None
else:
# Signal and RPC timeout use the same timeout
signal_timeout = rpc_timeout = timeout
# Phase 1: Followers send it's object to the leader
if is_leader:
_gather_to_leader(sequence_id, self_name, obj, worker_names)
else:
rpc_sync(
leader_name,
_gather_to_leader,
args=(sequence_id, self_name, obj, worker_names),
timeout=rpc_timeout,
)
with _all_gather_dict_lock:
states = _all_gather_sequence_id_to_states[sequence_id]
# Timeout is either set by function parameter or None (which is indefinite)
states.proceed_signal.wait(timeout=signal_timeout)
# Phase 2: Leader broadcast gathered results to all followers
# Leader's signal is the first to be unblocked, after receiving all
# followers' data objects.
if is_leader:
worker_name_to_response_future_dict = {}
for follower_name in worker_names - {leader_name}:
fut = rpc_async(
follower_name,
_broadcast_to_followers,
args=(sequence_id, states.gathered_objects),
timeout=rpc_timeout
)
worker_name_to_response_future_dict[follower_name] = fut
errors = []
for follower_name, fut in worker_name_to_response_future_dict.items():
try:
fut.wait()
except RuntimeError as ex:
errors.append((follower_name, ex))
if errors:
raise RuntimeError(
f"Followers {[e[0] for e in errors]} timed out in _all_gather "
f"after {rpc_timeout:.2f} seconds. The first exception is {errors[0][1]}"
)
# Clean up for the states using the sequence_id
with _all_gather_dict_lock:
states = _all_gather_sequence_id_to_states.pop(sequence_id)
return states.gathered_objects
@_require_initialized
def _barrier(worker_names):
r"""
Synchronizes local and remote RPC processes.
This will block until all local and remote RPC processes specified under worker_names
reach this method to wait for all outstanding work to complete.
Args:
worker_names (List[str]): The set of workers to synchronize.
"""
try:
_all_gather(None, set(worker_names))
except RuntimeError as ex:
logger.error(
f"Failed to complete barrier, got error {ex}"
)
@_require_initialized
def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT):
r"""
Block until all local and remote RPC processes reach this method and wait
for all outstanding work to complete. Every RPC process must call this
method before exit to perform a graceful shutdown. This should be used to
terminate the RPC framework, and there is no guarantee that the RPC
framework will work after this method returns.
"""
try:
_all_gather(None, timeout=timeout)
except RuntimeError as ex:
logger.error(
f"Failed to respond to 'Shutdown Proceed' in time, got error {ex}"
)
raise ex
@_require_initialized
def shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT):
r"""
Perform a shutdown of the RPC agent, and then destroy the RPC agent. This
stops the local agent from accepting outstanding requests, and shuts
down the RPC framework by terminating all RPC threads. If ``graceful=True``,
this will block until all local and remote RPC processes reach this method
and wait for all outstanding work to complete. Otherwise, if
``graceful=False``, this is a local shutdown, and it does not wait for other
RPC processes to reach this method.
.. warning::
For :class:`~torch.futures.Future` objects returned by
:meth:`~torch.distributed.rpc.rpc_async`, ``future.wait()`` should not
be called after ``shutdown()``.
Args:
graceful (bool): Whether to do a graceful shutdown or not. If True,
this will 1) wait until there is no pending system
messages for ``UserRRefs`` and delete them; 2) block
until all local and remote RPC processes have reached
this method and wait for all outstanding work to
complete.
Example::
Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
on both workers. Refer to :meth:`~torch.distributed.init_process_group`
API for more details. For example,
export MASTER_ADDR=localhost
export MASTER_PORT=5678
Then run the following code in two different processes:
>>> # xdoctest: +SKIP
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
Loading ...