import collections
import contextlib
import ctypes
import warnings
from typing import Any, Dict, Union, Tuple
import torch
from . import is_initialized, _get_device_index, _lazy_init
from ._utils import _dummy_type
from ._memory_viz import segments as _segments, memory as _memory
from torch.types import Device
from torch import _C
__all__ = ["caching_allocator_alloc", "caching_allocator_delete", "set_per_process_memory_fraction",
"empty_cache", "memory_stats", "memory_stats_as_nested_dict", "reset_accumulated_memory_stats",
"reset_peak_memory_stats", "reset_max_memory_allocated", "reset_max_memory_cached",
"memory_allocated", "max_memory_allocated", "memory_reserved", "max_memory_reserved",
"memory_cached", "max_memory_cached", "memory_snapshot", "memory_summary", "list_gpu_processes",
"mem_get_info", "get_allocator_backend", "CUDAPluggableAllocator", "change_current_allocator"]
if not hasattr(torch._C, '_cuda_CUDAAllocator'):
# Define dummy base classes
torch._C.__dict__['_cuda_CUDAAllocator'] = _dummy_type('_cuda_CUDAAllocator')
def _host_allocator():
_lazy_init()
return torch._C._cuda_cudaHostAllocator()
@contextlib.contextmanager
def _free_mutex():
torch._C._cuda_lock_mutex()
try:
yield
finally:
torch._C._cuda_unlock_mutex()
def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None):
r"""Performs a memory allocation using the CUDA memory allocator.
Memory is allocated for a given device and a stream, this
function is intended to be used for interoperability with other
frameworks. Allocated memory is released through
:func:`~torch.cuda.caching_allocator_delete`.
Args:
size (int): number of bytes to be allocated.
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then
the default stream for the selected device is used.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
if stream is None:
stream = torch.cuda.current_stream(device)
if isinstance(stream, torch.cuda.streams.Stream):
stream = stream.cuda_stream
if not isinstance(stream, int):
raise TypeError('Invalid type for stream argument, must be '
'`torch.cuda.Stream` or `int` representing a pointer '
'to a existing stream')
with torch.cuda.device(device):
return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)
def caching_allocator_delete(mem_ptr):
r"""Deletes memory allocated using the CUDA memory allocator.
Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`.
is freed here. The associated device and stream are tracked inside
the allocator.
Args:
mem_ptr (int): memory address to be freed by the allocator.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)
def set_per_process_memory_fraction(fraction, device: Union[Device, int] = None) -> None:
r"""Set memory fraction for a process.
The fraction is used to limit an caching allocator to allocated memory on a CUDA device.
The allowed value equals the total visible memory multiplied fraction.
If trying to allocate more than the allowed value in a process, will raise an out of
memory error in allocator.
Args:
fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction.
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
.. note::
In general, the total available free memory is less than the total capacity.
"""
_lazy_init()
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
if not isinstance(fraction, float):
raise TypeError('Invalid type for fraction argument, must be `float`')
if fraction < 0 or fraction > 1:
raise ValueError('Invalid fraction value: {}. '
'Allowed range: 0~1'.format(fraction))
torch._C._cuda_setMemoryFraction(fraction, device)
def empty_cache() -> None:
r"""Releases all unoccupied cached memory currently held by the caching
allocator so that those can be used in other GPU application and visible in
`nvidia-smi`.
.. note::
:func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU
memory available for PyTorch. However, it may help reduce fragmentation
of GPU memory in certain cases. See :ref:`cuda-memory-management` for
more details about GPU memory management.
"""
if is_initialized():
torch._C._cuda_emptyCache()
def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]:
r"""Returns a dictionary of CUDA memory allocator statistics for a
given device.
The return value of this function is a dictionary of statistics, each of
which is a non-negative integer.
Core statistics:
- ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of allocation requests received by the memory allocator.
- ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of allocated memory.
- ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of reserved segments from ``cudaMalloc()``.
- ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of reserved memory.
- ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of active memory blocks.
- ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of active memory.
- ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of inactive, non-releasable memory blocks.
- ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of inactive, non-releasable memory.
For these core statistics, values are broken down as follows.
Pool type:
- ``all``: combined statistics across all memory pools.
- ``large_pool``: statistics for the large allocation pool
(as of October 2019, for size >= 1MB allocations).
- ``small_pool``: statistics for the small allocation pool
(as of October 2019, for size < 1MB allocations).
Metric type:
- ``current``: current value of this metric.
- ``peak``: maximum value of this metric.
- ``allocated``: historical total increase in this metric.
- ``freed``: historical total decrease in this metric.
In addition to the core statistics, we also provide some simple event
counters:
- ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that
result in a cache flush and retry.
- ``"num_ooms"``: number of out-of-memory errors thrown.
The caching allocator can be configured via ENV to not split blocks larger than a
defined size (see Memory Management section of the Cuda Semantics documentation).
This helps avoid memory fragmentation but may have a performance
penalty. Additional outputs to assist with tuning and evaluating impact:
- ``"max_split_size"``: blocks above this size will not be split.
- ``"oversize_allocations.{current,peak,allocated,freed}"``:
number of over-size allocation requests received by the memory allocator.
- ``"oversize_segments.{current,peak,allocated,freed}"``:
number of over-size reserved segments from ``cudaMalloc()``.
The caching allocator can be configured via ENV to round memory allocations in order
to reduce fragmentation. Sometimes the overhead from rounding can be higher than
the fragmentation it helps reduce. The following stat can be used to check if
rounding adds too much overhed:
- ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
memory requested by client code, compare this with allocated_bytes to check if
allocation rounding adds too much overhead.
Args:
device (torch.device or int, optional): selected device. Returns
statistics for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
.. note::
With :ref:`backend:cudaMallocAsync<cuda-memory-envvars>`, some stats are not
meaningful, and are always reported as zero.
"""
result = []
def _recurse_add_to_result(prefix, obj):
if isinstance(obj, dict):
if len(prefix) > 0:
prefix += "."
for k, v in obj.items():
_recurse_add_to_result(prefix + k, v)
else:
result.append((prefix, obj))
stats = memory_stats_as_nested_dict(device=device)
_recurse_add_to_result("", stats)
result.sort()
return collections.OrderedDict(result)
def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]:
r"""Returns the result of :func:`~torch.cuda.memory_stats` as a nested dictionary."""
if not is_initialized():
return {}
device = _get_device_index(device, optional=True)
return torch._C._cuda_memoryStats(device)
def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None:
r"""Resets the "accumulated" (historical) stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to
the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
`"num_alloc_retries"` and `"num_ooms"`.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetAccumulatedMemoryStats(device)
def reset_peak_memory_stats(device: Union[Device, int] = None) -> None:
r"""Resets the "peak" stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the
`"peak"` key in each individual stat dict.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetPeakMemoryStats(device)
def reset_max_memory_allocated(device: Union[Device, int] = None) -> None:
r"""Resets the starting point in tracking maximum GPU memory occupied by
tensors for a given device.
See :func:`~torch.cuda.max_memory_allocated` for details.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
FutureWarning)
return reset_peak_memory_stats(device=device)
def reset_max_memory_cached(device: Union[Device, int] = None) -> None:
r"""Resets the starting point in tracking maximum GPU memory managed by the
caching allocator for a given device.
See :func:`~torch.cuda.max_memory_cached` for details.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
FutureWarning)
return reset_peak_memory_stats(device=device)
def memory_allocated(device: Union[Device, int] = None) -> int:
r"""Returns the current GPU memory occupied by tensors in bytes for a given
device.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
Loading ...