## @package hypothesis_test_util
# Module caffe2.python.hypothesis_test_util
"""
The Hypothesis library uses *property-based testing* to check
invariants about the code under test under a variety of random inputs.
The key idea here is to express properties of the code under test
(e.g. that it passes a gradient check, that it implements a reference
function, etc), and then generate random instances and verify they
satisfy these properties.
The main functions of interest are exposed on `HypothesisTestCase`.
You can usually just add a short function in this to generate an
arbitrary number of test cases for your operator.
The key functions are:
- `assertDeviceChecks(devices, op, inputs, outputs)`. This asserts that the
operator computes the same outputs, regardless of which device it is executed
on.
- `assertGradientChecks(device, op, inputs, output_,
outputs_with_grads)`. This implements a standard numerical gradient checker
for the operator in question.
- `assertReferenceChecks(device, op, inputs, reference)`. This runs the
reference function (effectively calling `reference(*inputs)`, and comparing
that to the output of output.
`hypothesis_test_util.py` exposes some useful pre-built samplers.
- `hu.gcs` - a gradient checker device (`gc`) and device checker devices (`dc`)
- `hu.gcs_cpu_only` - a CPU-only gradient checker device (`gc`) and
device checker devices (`dc`). Used for when your operator is only
implemented on the CPU.
"""
from caffe2.proto import caffe2_pb2
from caffe2.python import (
workspace, device_checker, gradient_checker, test_util, core)
import contextlib
import copy
import functools
import hypothesis
import hypothesis.extra.numpy
import hypothesis.strategies as st
import logging
import numpy as np
import os
import struct
def is_sandcastle():
return os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
def is_travis():
return 'TRAVIS' in os.environ
def to_float32(x):
return struct.unpack("f", struct.pack("f", float(x)))[0]
# "min_satisfying_examples" setting has been deprecated in hypothesis
# 3.56.0 and removed in hypothesis 4.x
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
if 'deadline' in kwargs and hypothesis.version.__version_info__ < (4, 44, 0):
kwargs.pop('deadline')
if 'timeout' in kwargs and hypothesis.version.__version_info__ >= (4, 44, 0):
if 'deadline' not in kwargs:
kwargs['deadline'] = kwargs['timeout'] * 1e3
kwargs.pop('timeout')
return hypothesis.settings(*args, **kwargs)
# This wrapper wraps around `st.floats` and
# sets width parameters to 32 if version is newer than 3.67.0
def floats(*args, **kwargs):
width_supported = hypothesis.version.__version_info__ >= (3, 67, 0)
if 'width' in kwargs and not width_supported:
kwargs.pop('width')
if 'width' not in kwargs and width_supported:
kwargs['width'] = 32
if kwargs.get('min_value', None) is not None:
kwargs['min_value'] = to_float32(kwargs['min_value'])
if kwargs.get('max_value', None) is not None:
kwargs['max_value'] = to_float32(kwargs['max_value'])
return st.floats(*args, **kwargs)
hypothesis.settings.register_profile(
"sandcastle",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
min_satisfying_examples=1,
verbosity=hypothesis.Verbosity.verbose,
deadline=10000))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
min_satisfying_examples=1,
verbosity=hypothesis.Verbosity.verbose,
deadline=10000))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
min_satisfying_examples=1,
verbosity=hypothesis.Verbosity.verbose,
deadline=50000))
hypothesis.settings.load_profile(
'sandcastle' if is_sandcastle() else os.getenv('CAFFE2_HYPOTHESIS_PROFILE',
'dev')
)
def dims(min_value=1, max_value=5):
return st.integers(min_value=min_value, max_value=max_value)
def elements_of_type(dtype=np.float32, filter_=None):
elems = None
if dtype is np.float16:
elems = floats(min_value=-1.0, max_value=1.0, width=16)
elif dtype is np.float32:
elems = floats(min_value=-1.0, max_value=1.0, width=32)
elif dtype is np.float64:
elems = floats(min_value=-1.0, max_value=1.0, width=64)
elif dtype is np.int32:
elems = st.integers(min_value=0, max_value=2 ** 31 - 1)
elif dtype is np.int64:
elems = st.integers(min_value=0, max_value=2 ** 63 - 1)
elif dtype is np.bool:
elems = st.booleans()
else:
raise ValueError("Unexpected dtype without elements provided")
return elems if filter_ is None else elems.filter(filter_)
def arrays(dims, dtype=np.float32, elements=None, unique=False):
if elements is None:
elements = elements_of_type(dtype)
return hypothesis.extra.numpy.arrays(
dtype,
dims,
elements=elements,
unique=unique,
)
def tensor(min_dim=1,
max_dim=4,
dtype=np.float32,
elements=None,
unique=False,
**kwargs):
dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
return dims_.flatmap(
lambda dims: arrays(dims, dtype, elements, unique=unique))
def tensor1d(min_len=1, max_len=64, dtype=np.float32, elements=None):
return tensor(1, 1, dtype, elements, min_value=min_len, max_value=max_len)
def segment_ids(size, is_sorted):
if size == 0:
return st.just(np.empty(shape=[0], dtype=np.int32))
if is_sorted:
return arrays(
[size],
dtype=np.int32,
elements=st.booleans()).map(
lambda x: np.cumsum(x, dtype=np.int32) - x[0])
else:
return arrays(
[size],
dtype=np.int32,
elements=st.integers(min_value=0, max_value=2 * size))
def lengths(size, min_segments=None, max_segments=None, **kwargs):
# First generate number of boarders between segments
# Then create boarder values and add 0 and size
# By sorting and computing diff we convert them to lengths of
# possible 0 value
if min_segments is None:
min_segments = 0
if max_segments is None:
max_segments = size
assert min_segments >= 0
assert min_segments <= max_segments
if size == 0 and max_segments == 0:
return st.just(np.empty(shape=[0], dtype=np.int32))
assert max_segments > 0, "size is not 0, need at least one segment"
return st.integers(
min_value=max(min_segments - 1, 0), max_value=max_segments - 1
).flatmap(
lambda num_borders:
hypothesis.extra.numpy.arrays(
np.int32, num_borders, elements=st.integers(
min_value=0, max_value=size
)
)
).map(
lambda x: np.append(x, np.array([0, size], dtype=np.int32))
).map(sorted).map(np.diff)
def segmented_tensor(
min_dim=1,
max_dim=4,
dtype=np.float32,
is_sorted=True,
elements=None,
segment_generator=segment_ids,
allow_empty=False,
**kwargs
):
gen_empty = st.booleans() if allow_empty else st.just(False)
data_dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
data_dims_ = st.tuples(
gen_empty, data_dims_
).map(lambda pair: ([0] if pair[0] else []) + pair[1])
return data_dims_.flatmap(lambda data_dims: st.tuples(
arrays(data_dims, dtype, elements),
segment_generator(data_dims[0], is_sorted=is_sorted),
))
def lengths_tensor(min_segments=None, max_segments=None, *args, **kwargs):
gen = functools.partial(
lengths, min_segments=min_segments, max_segments=max_segments)
return segmented_tensor(*args, segment_generator=gen, **kwargs)
def sparse_segmented_tensor(min_dim=1, max_dim=4, dtype=np.float32,
is_sorted=True, elements=None, allow_empty=False,
segment_generator=segment_ids, itype=np.int64,
**kwargs):
gen_empty = st.booleans() if allow_empty else st.just(False)
data_dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
all_dims_ = st.tuples(gen_empty, data_dims_).flatmap(
lambda pair: st.tuples(
st.just(pair[1]),
(st.integers(min_value=1, max_value=pair[1][0]) if not pair[0]
else st.just(0)),
))
return all_dims_.flatmap(lambda dims: st.tuples(
arrays(dims[0], dtype, elements),
arrays(dims[1], dtype=itype, elements=st.integers(
min_value=0, max_value=dims[0][0] - 1)),
segment_generator(dims[1], is_sorted=is_sorted),
))
def sparse_lengths_tensor(**kwargs):
return sparse_segmented_tensor(segment_generator=lengths, **kwargs)
def tensors(n, min_dim=1, max_dim=4, dtype=np.float32, elements=None, **kwargs):
dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
return dims_.flatmap(
lambda dims: st.lists(
arrays(dims, dtype, elements),
min_size=n,
max_size=n))
def tensors1d(n, min_len=1, max_len=64, dtype=np.float32, elements=None):
return tensors(
n, 1, 1, dtype, elements, min_value=min_len, max_value=max_len
)
cpu_do = caffe2_pb2.DeviceOption()
cuda_do = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA)
hip_do = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.HIP)
gpu_do = caffe2_pb2.DeviceOption(device_type=workspace.GpuDeviceType) # CUDA or ROCm
_cuda_do_list = ([cuda_do] if workspace.has_cuda_support else [])
_hip_do_list = ([hip_do] if workspace.has_hip_support else [])
_gpu_do_list = ([gpu_do] if workspace.has_gpu_support else [])
# (bddppq) Do not rely on this no_hip option! It's just used to
# temporarily skip some flaky tests on ROCM before it's getting more mature.
_device_options_no_hip = [cpu_do] + _cuda_do_list
device_options = _device_options_no_hip + _hip_do_list
# Include device option for each GPU
expanded_device_options = [cpu_do] + [
caffe2_pb2.DeviceOption(device_type=workspace.GpuDeviceType, device_id=i)
for i in range(workspace.NumGpuDevices())]
def device_checker_device_options():
return st.just(device_options)
def gradient_checker_device_option():
return st.sampled_from(device_options)
gcs = dict(
gc=gradient_checker_device_option(),
dc=device_checker_device_options()
)
gcs_cpu_only = dict(gc=st.sampled_from([cpu_do]), dc=st.just([cpu_do]))
gcs_cuda_only = dict(gc=st.sampled_from(_cuda_do_list), dc=st.just(_cuda_do_list))
gcs_gpu_only = dict(gc=st.sampled_from(_gpu_do_list), dc=st.just(_gpu_do_list)) # CUDA or ROCm
gcs_no_hip = dict(gc=st.sampled_from(_device_options_no_hip), dc=st.just(_device_options_no_hip))
@contextlib.contextmanager
def temp_workspace(name=b"temp_ws"):
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace(name, True)
yield
workspace.ResetWorkspace()
workspace.SwitchWorkspace(old_ws_name)
def runOpBenchmark(
device_option,
op,
inputs,
Loading ...