import builtins
import collections
import logging
import math
import os
import re
import types
import weakref
from inspect import currentframe, getframeinfo
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from weakref import ReferenceType
import torch
from torch._guards import (
DuplicateInputs,
Guard,
GuardBuilderBase,
GuardEnvExpr,
GuardSource,
Source,
)
from torch.fx.experimental.symbolic_shapes import SYMPY_INTERP
from . import config, convert_frame, mutation_guard
from .eval_frame import set_guard_error_hook, set_guard_fail_hook
from .exc import unimplemented
from .types import GuardedCode, GuardFail, GuardFn # noqa: F401
from .utils import (
dict_const_keys,
dict_const_keys_repr,
dict_param_key_ids,
guard_failures,
HAS_NUMPY,
istype,
np,
orig_code_map,
rename_implicit,
tuple_iterator_getitem,
tuple_iterator_len,
)
log = logging.getLogger(__name__)
TensorGuards = torch._C._dynamo.guards.TensorGuards
check_obj_id = torch._C._dynamo.guards.check_obj_id
check_type_id = torch._C._dynamo.guards.check_type_id
CLOSURE_VARS = collections.OrderedDict(
[
("___check_type_id", check_type_id),
("___check_obj_id", check_obj_id),
("___is_grad_enabled", torch.is_grad_enabled),
("___odict_getitem", collections.OrderedDict.__getitem__),
("___dict_param_key_ids", dict_param_key_ids),
("___dict_const_keys", dict_const_keys),
("___tuple_iterator_len", tuple_iterator_len),
("___tuple_iterator_getitem", tuple_iterator_getitem),
("__math_isnan", math.isnan),
("inf", float("inf")),
]
)
def strip_function_call(name):
"""
"___odict_getitem(a, 1)" => "a"
"""
m = re.search(r"([a-z0-9_]+)\(([^(),]+)[^()]*\)", name)
if m and m.group(1) != "slice":
return strip_function_call(m.group(2))
return strip_getattr_getitem(name)
def strip_getattr_getitem(name):
"""
"a[1]" => "a"
"a.foo" => "a"
"""
return re.split(r"[.\[]", name)[0]
class GuardBuilder(GuardBuilderBase):
def __init__(
self,
id_ref: Callable[[Type[object]], str],
source_ref: Callable[[Source], str],
scope: Optional[Dict[str, object]],
check_fn_manager: "CheckFunctionManager",
renames=True,
):
self.id_ref = id_ref
self.source_ref = source_ref
if scope:
if renames:
scope = {rename_implicit(k): v for k, v in scope.items()}
else:
scope = dict()
self.scope: Dict[str, object] = scope
self.scope["__builtins__"] = builtins.__dict__.copy()
for (
name,
package_module,
) in torch.package.package_importer._package_imported_modules.items():
name = name.replace(">", "_").replace("<", "_").replace(".", "_dot_")
# Write the package module into the scope so that we can import it
self.scope["__builtins__"][name] = package_module # type: ignore[index]
# Write the demangled name to the scope so that we can use it
self.scope[name] = package_module
self.argnames: List[str] = []
# Code is python expression strings generated for each guard
self.code: List[str] = []
# shape_env_code is only used by local_builder and is used for
# shape env code. This exists only because we need to make sure
# shape env guards get run after tensor match guards (since the
# tensor match guards make sure we actually have tensors)
self.shape_env_code: List[str] = []
# Most of the time, we generate Python code in a guard to directly
# check various properties. However, tensors are a bit special;
# it is too slow to check their properties one-by-one in Python.
# Instead, there is a C++ function TensorGuards.check which takes
# all of the tensor arguments and checks them all against compile-time
# examples entirely in C++. Thus, every time we process a
# TENSOR_MATCH guard, we just add another entry to
# tensor_check_names/tensor_check_examples, saying "for this local,
# check it against this example", and it all ends up getting
# swept up into a single call to ___check_tensors. Invariant:
# len(tensor_check_names) == len(tensor_check_examples).
self.tensor_check_names: List[str] = []
self.tensor_check_examples: List[torch.Tensor] = []
self.tensor_check_ids: Dict[str, int] = {}
self.check_fn_manager: CheckFunctionManager = check_fn_manager
# Warning: use this with care! This lets you access what the current
# value of the value you are guarding on is. You probably don't want
# to actually durably save this value though (because it's specific
# to this frame!) Instead, you should be reading out some property
# (like its type) which is what you permanently install into the
# guard code.
def get(self, name: str) -> Any:
return eval(name, self.scope, CLOSURE_VARS)
# Registers the usage of the source name referenced by the
# string (or stored in the Guard) as being guarded upon. It's important
# to call this before generating some code that makes use of 'guard',
# because without this call, we won't actually bind the variable
# you reference in the actual guard closure (oops!)
def arg_ref(self, guard: Union[str, Guard]) -> str:
name: str
if isinstance(guard, str):
name = guard
else:
name = guard.name
base = strip_getattr_getitem(strip_function_call(name))
if base not in self.argnames:
if re.match(r"^\d+$", base):
log.warning(f"invalid var name: {guard}")
self.argnames.append(base)
return name
def TYPE_MATCH(self, guard: Guard):
# ___check_type_id is same as `id(type(x)) == y`
t = type(self.get(guard.name))
obj_id = self.id_ref(t)
code = f"___check_type_id({self.arg_ref(guard)}, {obj_id})"
self._produce_guard_code(guard, [code])
def ID_MATCH(self, guard: Guard):
# ___check_obj_id is same as `id(x) == y`
m = re.match(r"^type\((.+)\)$", guard.name)
if m:
# optional optimization to produce cleaner/faster guard code
return self.TYPE_MATCH(
Guard(m.group(1), guard.source, GuardBuilder.TYPE_MATCH)
)
code = f"___check_obj_id({self.arg_ref(guard)}, {self.id_ref(self.get(guard.name))})"
self._produce_guard_code(guard, [code])
def NAME_MATCH(self, guard: Guard):
obj = self.get(guard.name)
code = f"{self.arg_ref(guard)}.__name__ == {obj.__name__}"
self._produce_guard_code(guard, [code])
def HASATTR(self, guard: Guard):
m = re.match(r"^(.*)[.]([a-zA-Z0-9_]+)$", guard.name)
assert m, f"invalid hasattr check {guard.name}"
base, attr = m.group(1, 2)
ref = self.arg_ref(base)
val = hasattr(self.get(base), attr)
code = None
if val:
code = f"hasattr({ref}, {attr!r})"
else:
code = f"not hasattr({ref}, {attr!r})"
self._produce_guard_code(guard, [code], provided_guarded_object=self.get(base))
def EQUALS_MATCH(self, guard: Guard):
ref = self.arg_ref(guard)
val = self.get(guard.name)
t = type(val)
np_types = (
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float16,
np.float32,
np.float64,
)
if HAS_NUMPY
else ()
)
assert istype(
val,
(
int,
float,
bool,
type(None),
str,
type,
list,
tuple,
set,
slice,
frozenset,
range,
torch.Size,
torch.device,
torch.dtype,
)
+ np_types,
), t.__name__
if istype(val, (torch.device, torch.dtype)):
# TODO(jansel): is this slow? perhaps optimize it
code = [f"str({ref}) == {str(val)!r}"]
self._produce_guard_code(guard, code)
return
# Special case for nan because float("nan") == float("nan") evaluates to False
if istype(val, float) and math.isnan(val):
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"__math_isnan({ref})")
self._produce_guard_code(guard, code)
return
# Add type check to prevent equality check between tensor and non-tensor.
code = list()
if istype(val, (list, tuple)):
self.LIST_LENGTH(guard)
for idx, elem in enumerate(val):
code.append(
f"___check_type_id({ref}[{idx}], {self.id_ref(type(elem))})"
)
elif not istype(val, torch.Size):
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
if istype(val, torch.Size):
val = tuple(val)
code.append(f"{ref} == {val!r}")
self._produce_guard_code(guard, code)
def CONSTANT_MATCH(self, guard: Guard):
val = self.get(guard.name)
if istype(val, (bool, type(None))):
self.ID_MATCH(guard)
else:
self.EQUALS_MATCH(guard)
def NN_MODULE(self, guard: Guard):
self.ID_MATCH(guard)
ref = self.arg_ref(guard)
val = self.get(guard.name)
def setup_guard():
assert istype(val.training, bool)
self.code.append(f"{ref}.training == {val.training}")
if hasattr(val, "training"):
# There are cases where a monkeypatched object has a guard made between __new__ and __init__
setup_guard()
else:
unimplemented(f"Guard setup for uninitialized class {type(val)}")
def FUNCTION_MATCH(self, guard: Guard):
"""things like torch.add and user defined functions"""
if guard.is_local():
return self.ID_MATCH(guard)
def BUILTIN_MATCH(self, guard: Guard):
return self.FUNCTION_MATCH(guard)
def PYMODULE_MATCH(self, guard: Guard):
return self.FUNCTION_MATCH(guard)
def LIST_LENGTH(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"len({ref}) == {len(value)}")
self._produce_guard_code(guard, code)
def TUPLE_ITERATOR_LEN(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"___tuple_iterator_len({ref}) == {tuple_iterator_len(value)}")
self._produce_guard_code(guard, code)
def DICT_KEYS(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
param_key_ids = set(dict_param_key_ids(value))
const_keys = set(dict_const_keys(value))
const_keys_repr = dict_const_keys_repr(const_keys)
if param_key_ids:
code.append(f"___dict_param_key_ids({ref}) == {param_key_ids!r}")
Loading ...