Skip to content
Snippets Groups Projects
Unverified Commit 865ae1a0 authored by daquexian's avatar daquexian Committed by GitHub
Browse files

Update autotest framework (#5520)


* update automated test framework

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* rename func -> generator

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* reformat

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* update more powerful conv test

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* new imperative autotest api

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* update automated test framework

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* add some tests and refine code

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* update test_matmul.py

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* fix test_matmul.py

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* add atol and rtol, fix test_matmul.py

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* auto format by CI

* reformat

Signed-off-by: default avatardaquexian <daquexian566@gmail.com>

* tricky export

* tricky export

* fix comments

* auto format by CI

* fix comments

* auto format by CI

* fix code

* dirtyfix

* fixname

* fix

* fix bug

* fix comments

Co-authored-by: default avataroneflow-ci-bot <ci-bot@oneflow.org>
Co-authored-by: default avatarXiaoyu Zhang <35585791+BBuf@users.noreply.github.com>
Co-authored-by: default avatarBBuf <1182563586@qq.com>
Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
Co-authored-by: default avatartsai <jackalcooper@gmail.com>
parent ab8aab8b
No related branches found
No related tags found
No related merge requests found
Showing
with 1081 additions and 419 deletions
......@@ -6,6 +6,7 @@ export PYTHONUNBUFFERED=1
src_dir=${ONEFLOW_SRC_DIR:-"$PWD"}
test_dir=${ONEFLOW_TEST_DIR:-"$PWD/oneflow/python/test/modules"}
test_tmp_dir=${ONEFLOW_TEST_TMP_DIR:-"./test_tmp_dir"}
export ONEFLOW_TEST_UTILS_DIR=$src_dir/oneflow/python/test_utils
rm -rf $test_tmp_dir
......
......@@ -47,7 +47,7 @@ class MatMul(Module):
@oneflow_export("matmul")
@register_tensor_op("matmul")
@experimental_api
def matmul_op(a, b):
def matmul_op(input, other):
r"""This operator applies matrix multiplication to two Tensor.
Args:
......@@ -71,7 +71,7 @@ def matmul_op(a, b):
flow.Size([2, 5])
"""
return MatMul()(a, b)
return MatMul()(input, other)
if __name__ == "__main__":
......
......@@ -13,416 +13,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import typing # This unused import is needed
from typing import Dict, Optional, Tuple, Any, Union
from collections import namedtuple
import random as random_util
import os
import sys
import oneflow.experimental as flow
import torch
import numpy as np
test_util_parent_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
TEST_MODULE = 0
TEST_FLOW = 1
TEST_TENSOR = 2
oneflow_test_utils_dir_from_env = os.getenv("ONEFLOW_TEST_UTILS_DIR")
if oneflow_test_utils_dir_from_env:
from pathlib import Path
rng = np.random.default_rng()
oneflow_test_utils_dir_from_env = Path(oneflow_test_utils_dir_from_env)
test_util_parent_dir = str(oneflow_test_utils_dir_from_env.parent.absolute())
default_generators = {}
def data_generator(annotation):
def register_data_generator(func):
default_generators[annotation] = func
return func
return register_data_generator
@data_generator(bool)
def _random_bool():
val = random_util.choice([True, False])
return val, val
@data_generator(torch.Tensor)
def _random_tensor():
return random_tensor()(None)
def random_tensor(ndim=None, dim0=1, dim1=None, dim2=None, dim3=None, dim4=None):
assert ndim is None or 1 <= ndim <= 5
if ndim is None:
ndim = rng.integers(low=1, high=6)
shape = rng.integers(low=1, high=8, size=ndim)
if dim0 is not None:
shape[0] = dim0
if ndim >= 2 and dim1 is not None:
shape[1] = dim1
if ndim >= 3 and dim2 is not None:
shape[2] = dim2
if ndim >= 4 and dim3 is not None:
shape[3] = dim3
if ndim == 5 and dim4 is not None:
shape[4] = dim4
def generator(_):
np_arr = rng.random(shape)
return flow.Tensor(np_arr), torch.Tensor(np_arr)
return generator
def choose(x):
def generator(_):
val = random_util.choice(x)
return val, val
return generator
def random(low, high):
def generator(annotation):
if hasattr(annotation, "__origin__"):
# PyTorch _size_2_t and similar types are defined by type variables,
# leading to unexpected __args__ and __origin__
#
# _size_2_t = Union[T, Tuple[T, T]][int]
# _size_2_t.__origin__
# >> typing.Union[~T, typing.Tuple[~T, ~T]]
#
# So recreate a new annotation object by repr and eval
#
# _size_2_t
# >> typing.Union[int, typing.Tuple[int, int]]
# _size_2_t_new = eval(repr(annotation))
# _size_2_t_new.__origin__
# >> typing.Union
annotation = eval(repr(annotation))
if annotation.__origin__ is Union:
x = random_util.choice(annotation.__args__)
return generator(x)
if annotation.__origin__ is Tuple or annotation.__origin__ is tuple:
t = [generator(x) for x in annotation.__args__]
return zip(*t)
else:
raise NotImplementedError(
f"Not implemented annotation {annotation} in random, type(annotation.__origin__) is {type(annotation.__origin__)}"
)
if annotation == int:
val = int(rng.integers(low, high))
elif annotation == float:
val = float(rng.random() * (high - low) + low)
else:
raise NotImplementedError(
f"Not implemented annotation {annotation} in random"
)
return val, val
return generator
def constant(val):
def generator(_):
return val, val
return generator
def test_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
api_flag: int = TEST_MODULE,
):
assert device in ["cuda", "cpu"]
if not training:
assert not backward
if extra_annotations is None:
extra_annotations = {}
if extra_generators is None:
extra_generators = {}
if extra_defaults is None:
extra_defaults = {}
if pytorch_callable_name is None:
pytorch_callable_name = callable_name
verbose = os.getenv("ONEFLOW_TEST_VERBOSE") is not None
def has_full_args_spec(callable):
try:
spec = inspect.getfullargspec(callable)
return True
except Exception:
return False
if api_flag == TEST_TENSOR:
pytorch_tensor = torch.Tensor(1)
pytorch_call = eval(f"pytorch_tensor.{pytorch_callable_name}")
else:
pytorch_call = eval(f"torch.{pytorch_callable_name}")
Spec = namedtuple(
"spec",
"args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations",
)
if has_full_args_spec(pytorch_call):
tmp_spec = inspect.getfullargspec(pytorch_call)
new_defaults = tmp_spec.defaults
if new_defaults is None:
new_defaults = []
new_kwonlydefaults = tmp_spec.kwonlydefaults
if new_kwonlydefaults is None:
new_kwonlydefaults = []
spec = Spec(
tmp_spec.args,
tmp_spec.varargs,
tmp_spec.varkw,
new_defaults,
tmp_spec.kwonlyargs,
new_kwonlydefaults,
tmp_spec.annotations,
)
else:
args = list(extra_annotations.keys()) + list(extra_defaults.keys())
spec = Spec(args, None, None, [], [], {}, {})
annotations = spec.annotations
annotations.update(extra_annotations)
if "return" in annotations:
del annotations["return"]
args = (set(spec.args) | set(spec.kwonlyargs)) - {"self"}
assert args == set(
annotations.keys()
), f"args = {args}, annotations = {annotations.keys()}"
annotations.update({"input": torch.Tensor})
def has_default(name):
if name in spec.args:
return (len(spec.args) - spec.args.index(name)) <= len(spec.defaults)
else:
assert name in spec.kwonlyargs
return (len(spec.kwonlyargs) - spec.kwonlyargs.index(name)) <= len(
spec.kwonlydefaults
)
def generate(name):
annotation = annotations[name]
if name in extra_generators:
return extra_generators[name](annotation)
return default_generators[annotation]()
while n > 0:
flow_attr_dict = {}
torch_attr_dict = {}
for name in args:
if has_default(name):
if rng.random() < 1 / 3:
continue
flow_data, torch_data = generate(name)
if isinstance(torch_data, torch.Tensor):
torch_data = torch_data.to(device)
if isinstance(flow_data, flow.Tensor):
flow_data = flow_data.to(device)
flow_attr_dict[name] = flow_data
torch_attr_dict[name] = torch_data
if verbose:
print(f"attr = {torch_attr_dict}, device = {device}")
flow_input_original, torch_input_original = generate("input")
flow_input_original.requires_grad_(backward)
torch_input_original.requires_grad_(backward)
flow_input, torch_input = (
flow_input_original.to(device),
torch_input_original.to(device),
)
try:
if api_flag == TEST_MODULE:
torch_call = pytorch_call(**torch_attr_dict)
torch_call = torch_call.to(device)
torch_call.train(training)
torch_res = torch_call(torch_input)
state_dict = torch_call.state_dict()
state_dict = {
k: v.detach().cpu().numpy() for k, v in state_dict.items()
}
elif api_flag == TEST_FLOW:
torch_xxx_func = eval(f"torch.{pytorch_callable_name}")
torch_res = torch_xxx_func(torch_input, **torch_attr_dict)
else:
torch_tensor_xxx_func = eval(f"torch_input.{pytorch_callable_name}")
torch_res = torch_tensor_xxx_func(**torch_attr_dict)
loss = torch_res.sum()
loss.backward()
if api_flag == TEST_MODULE:
state_dict = torch_call.state_dict()
state_dict = {
k: v.detach().cpu().numpy() for k, v in state_dict.items()
}
except Exception as e:
if verbose:
print(f"PyTorch error: {e}")
# The random generated test data is not always valid,
# so just skip when PyTorch raises an exception
continue
if api_flag == TEST_MODULE:
flow_call_class = eval(f"flow.{callable_name}")
flow_call = flow_call_class(**flow_attr_dict)
flow_call = flow_call.to(device)
flow_call.train(training)
flow_call.load_state_dict(state_dict)
flow_res = flow_call(flow_input)
elif api_flag == TEST_FLOW:
flow_xxx_func = eval(f"flow.{callable_name}")
flow_res = flow_xxx_func(flow_input, **flow_attr_dict)
else:
flow_tensor_xxx_func = eval(f"flow_input.{callable_name}")
flow_res = flow_tensor_xxx_func(**flow_attr_dict)
loss = flow_res.sum()
loss.backward()
def allclose_or_fail(flow_tensor, torch_tensor):
is_allclose = np.allclose(
flow_tensor.numpy(),
torch_tensor.detach().cpu().numpy(),
rtol=rtol,
atol=atol,
)
test_case.assertTrue(
is_allclose,
f"flow_tensor = {flow_tensor},\ntorch_tensor = {torch_tensor},\nattr_dict = {torch_attr_dict},\nflow_input_tensor = {flow_input_original}",
)
allclose_or_fail(flow_res, torch_res)
allclose_or_fail(flow_input_original.grad, torch_input_original.grad)
if api_flag == TEST_MODULE:
flow_parameters = dict(flow_call.named_parameters())
for name, torch_param in torch_call.named_parameters():
flow_param = flow_parameters[name]
allclose_or_fail(flow_param.grad, torch_param.grad)
n -= 1
def test_module_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
):
return test_against_pytorch(
test_case=test_case,
callable_name=callable_name,
extra_annotations=extra_annotations,
extra_generators=extra_generators,
extra_defaults=extra_defaults,
device=device,
training=training,
backward=backward,
rtol=rtol,
atol=atol,
n=n,
pytorch_callable_name=pytorch_callable_name,
api_flag=TEST_MODULE,
)
def test_flow_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
):
return test_against_pytorch(
test_case=test_case,
callable_name=callable_name,
extra_annotations=extra_annotations,
extra_generators=extra_generators,
extra_defaults=extra_defaults,
device=device,
training=training,
backward=backward,
rtol=rtol,
atol=atol,
n=n,
pytorch_callable_name=pytorch_callable_name,
api_flag=TEST_FLOW,
)
def test_tensor_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
):
return test_against_pytorch(
test_case=test_case,
callable_name=callable_name,
extra_annotations=extra_annotations,
extra_generators=extra_generators,
extra_defaults=extra_defaults,
device=device,
training=training,
backward=backward,
rtol=rtol,
atol=atol,
n=n,
pytorch_callable_name=pytorch_callable_name,
api_flag=TEST_TENSOR,
)
__all__ = [
"random_tensor",
"random",
"choose",
"constant",
"test_module_against_pytorch",
"test_flow_against_pytorch",
"test_tensor_against_pytorch",
]
sys.path.append(test_util_parent_dir)
from test_utils.automated_test_util import *
......@@ -1808,12 +1808,13 @@ class TestConv2d(flow.unittest.TestCase):
@unittest.skip("need a more relaxed tolerance")
def test_with_random_data(test_case):
for device in ["cpu", "cuda"]:
channels = random(1, 6)
test_module_against_pytorch(
test_case,
"nn.Conv2d",
extra_generators={
"input": random_tensor(ndim=4, dim1=4),
"in_channels": constant(4),
"input": random_tensor(ndim=4, dim1=channels),
"in_channels": channels,
"out_channels": random(1, 129),
"kernel_size": random(1, 4),
"stride": random(1, 4),
......@@ -1825,6 +1826,30 @@ class TestConv2d(flow.unittest.TestCase):
device=device,
)
@unittest.skip("need a more relaxed tolerance")
@autotest()
def test_against_pytorch(test_case):
channels = random(1, 6)
m = torch.nn.Conv2d(
channels,
random(1, 6),
random(1, 6),
stride=random(1, 3) | nothing(),
padding=random(1, 3) | nothing(),
dilation=random(1, 3) | nothing(),
groups=random(1, 3) | nothing(),
bias=random() | nothing(),
padding_mode=constant("zeros") | nothing(),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(
ndim=4, dim1=channels, dim2=random(1, 8), dim3=random(1, 8)
).to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
......@@ -67,12 +67,20 @@ class TestFlattenModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_with_random_data(test_case):
test_module_against_pytorch(
test_case,
"nn.Flatten",
extra_generators={"start_dim": random(1, 6), "end_dim": random(1, 6),},
# Our flatten produces a new tensor if flatten is effectively a no-op,
# while pytorch's flatten returns the input tensor itself,
# leading to the inconsistency on the leaf-ness of x and thus the existence of x's grad
@autotest(auto_backward=False)
def test_against_pytorch(test_case):
m = torch.nn.Flatten(
start_dim=random(1, 6) | nothing(), end_dim=random(1, 6) | nothing()
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
if __name__ == "__main__":
......
......@@ -23,6 +23,7 @@ from automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestMaskedFill(flow.unittest.TestCase):
@unittest.skip("has bug now, need rewrite")
def test_masked_fill_aginst_pytorch(test_case):
import numpy as np
import torch
......
......@@ -17,9 +17,11 @@ from collections import OrderedDict
import unittest
import numpy as np
import torch
import oneflow.experimental as flow
from test_util import GenArgList
from automated_test_util import *
def _test_matmul(test_case, device):
......@@ -333,6 +335,14 @@ class TestModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_flow_matmul_with_random_data(test_case):
k = random(1, 6)
x = random_pytorch_tensor(ndim=2, dim1=k)
y = random_pytorch_tensor(ndim=2, dim0=k)
z = torch.matmul(x, y)
return z
if __name__ == "__main__":
unittest.main()
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
from test_utils.automated_test_util import *
......@@ -748,6 +748,13 @@ class TestTensor(flow.unittest.TestCase):
np.allclose(of_input.grad.numpy(), np_out_grad, 1e-4, 1e-4, equal_nan=True)
)
# TODO: find a way to import automated_test_util here to enable the following test
#
# @autotest()
# def test_tensor_tan(test_case):
# x = random_pytorch_tensor().to(random_device())
# return x.tan()
def test_tensor_tan(test_case):
np_input = np.random.random((2, 3)) - 0.5
of_input = flow.Tensor(np_input, dtype=flow.float32, requires_grad=True)
......
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Since the name of test_util is already occupied, the name of this package is called test_utils.
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .generators import *
from .torch_flow_dual_object import *
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import typing # This unused import is needed
from typing import Dict, Optional, Tuple, Any, Union
from collections import namedtuple
import random as random_util
import os
import numpy as np
import oneflow.experimental as flow
import torch
import numpy as np
py_tuple = tuple
TEST_MODULE = 0
TEST_FLOW = 1
TEST_TENSOR = 2
rng = np.random.default_rng()
annotation2default_generator = {}
annotation2torch_to_flow_converter = {}
def data_generator(annotation):
def register_data_generator(cls):
annotation2default_generator[annotation] = lambda: cls()
return cls
return register_data_generator
def torch_to_flow_converter(annotation):
def register_flow_to_flow_converter(func):
annotation2torch_to_flow_converter[annotation] = func
return func
return register_flow_to_flow_converter
@torch_to_flow_converter(torch.Tensor)
def tensor_converter(torch_tensor):
return flow.tensor(torch_tensor.cpu().numpy())
def convert_torch_object_to_flow(x):
for annotation, converter in annotation2torch_to_flow_converter.items():
if isinstance(x, annotation):
return converter(x)
return x
def pack(x):
if isinstance(x, generator):
return x
return constant(x)
class Nothing:
pass
class generator:
def __init__(self, children):
self.children = children
self._value = None
def _init(self):
self._value = None
for x in self.children:
x._init()
def eval(self):
self._init()
return self.value()
def _calc_value(self):
raise NotImplementedError()
def value(self):
if self._value is None:
self._value = self._calc_value()
return self._value
def size(self):
return 1
def __or__(self, other):
other = pack(other)
return oneof(
self, other, possibility=self.size() / (self.size() + other.size())
)
def __ror__(self, other):
return self | other
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self + neg(other)
def __rsub__(self, other):
return neg(self - other)
def to(self, annotation):
self._to(annotation)
for x in self.children:
x.to(annotation)
return self
def _to(self, annotation):
pass
class add(generator):
def __init__(self, a, b):
self.a = pack(a)
self.b = pack(b)
super().__init__([self.a, self.b])
def _calc_value(self):
return self.a.value() + self.b.value()
class neg(generator):
def __init__(self, a):
self.a = pack(a)
super().__init__([self.a])
def _calc_value(self):
return -self.a.value()
class oneof(generator):
def __init__(self, *args, possibility=None):
self.args = list(map(pack, args))
super().__init__(self.args)
if isinstance(possibility, float):
assert len(args) == 2
possibility = [possibility, 1 - possibility]
if possibility is None:
possibility = [1 / len(args)] * len(args)
self.possibility = pack(possibility)
def _calc_value(self):
rand = rng.random()
sum = 0
for i, possibility in enumerate(self.possibility.value()):
sum += possibility
if sum > rand:
return self.args[i].value()
raise RuntimeError()
def size(self):
return sum([x.size() for x in self.args])
class tuple(generator):
def __init__(self, *args):
self.args = list(map(pack, args))
super().__init__(self.args)
def _calc_value(self):
return py_tuple([x.value() for x in self.args])
class constant(generator):
def __init__(self, x):
super().__init__([])
self.x = x
def _calc_value(self):
return self.x
class nothing(generator):
def __init__(self):
super().__init__([])
def _calc_value(self):
return Nothing()
class random(generator):
def __init__(self, low=1, high=6):
self.low = pack(low)
self.high = pack(high)
super().__init__([self.low, self.high])
self.annotation = None
def _to(self, annotation):
if self.annotation is not None:
return
if hasattr(annotation, "__origin__"):
# PyTorch _size_2_t and similar types are defined by type variables,
# leading to unexpected __args__ and __origin__
#
# >>> _size_2_t = Union[T, Tuple[T, T]][int]
# >>> _size_2_t.__origin__
# typing.Union[~T, typing.Tuple[~T, ~T]]
#
# So recreate a new annotation object by repr and eval
#
# >>> _size_2_t
# typing.Union[int, typing.Tuple[int, int]]
# >>> _size_2_t_new = eval(repr(annotation))
# >>> _size_2_t_new.__origin__
# typing.Union
annotation = eval(repr(annotation))
self.annotation = annotation
def _generate(self, annotation):
if hasattr(annotation, "__origin__"):
if annotation.__origin__ is Union:
x = random_util.choice(annotation.__args__)
return self._generate(x)
if annotation.__origin__ is Tuple or annotation.__origin__ is py_tuple:
return [self._generate(x) for x in annotation.__args__]
else:
raise NotImplementedError(
f"Not implemented annotation {annotation} in random, type(annotation.__origin__) is {type(annotation.__origin__)}"
)
low, high = self.low.value(), self.high.value()
if annotation == int:
val = int(rng.integers(low, high))
elif annotation == float:
val = float(rng.random() * (high - low) + low)
elif annotation == bool:
val = random_util.choice([True, False])
else:
raise NotImplementedError(
f"Not implemented annotation {annotation} in random"
)
return val
def _calc_value(self):
return self._generate(self.annotation)
def random_or_nothing(low, high):
return oneof(random(low, high), nothing(), possibility=2 / 3)
@data_generator(torch.Tensor)
class random_tensor(generator):
def __init__(self, ndim=None, dim0=1, dim1=None, dim2=None, dim3=None, dim4=None):
if ndim is None:
ndim = random(1, 6)
if dim0 is None:
dim0 = random(1, 8)
if dim1 is None:
dim1 = random(1, 8)
if dim2 is None:
dim2 = random(1, 8)
if dim3 is None:
dim3 = random(1, 8)
if dim4 is None:
dim4 = random(1, 8)
self.ndim = pack(ndim).to(int)
self.dim0 = pack(dim0).to(int)
self.dim1 = pack(dim1).to(int)
self.dim2 = pack(dim2).to(int)
self.dim3 = pack(dim3).to(int)
self.dim4 = pack(dim4).to(int)
super().__init__(
[self.ndim, self.dim0, self.dim1, self.dim2, self.dim3, self.dim4]
)
def _calc_value(self):
ndim = self.ndim.value()
dim0 = self.dim0.value()
dim1 = self.dim1.value()
dim2 = self.dim2.value()
dim3 = self.dim3.value()
dim4 = self.dim4.value()
shape = rng.integers(low=1, high=8, size=ndim)
if dim0 is not None:
shape[0] = dim0
if ndim >= 2:
shape[1] = dim1
if ndim >= 3:
shape[2] = dim2
if ndim >= 4:
shape[3] = dim3
if ndim == 5:
shape[4] = dim4
np_arr = rng.random(shape)
return torch.Tensor(np_arr)
@data_generator(bool)
def random_bool():
return random().to(bool)
class random_device(generator):
def __init__(self):
super().__init__([])
def _calc_value(self):
return random_util.choice(["cuda", "cpu"])
def test_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
api_flag: int = TEST_MODULE,
):
assert device in ["cuda", "cpu"]
if not training:
assert not backward
if extra_annotations is None:
extra_annotations = {}
if extra_generators is None:
extra_generators = {}
if extra_defaults is None:
extra_defaults = {}
if pytorch_callable_name is None:
pytorch_callable_name = callable_name
verbose = os.getenv("ONEFLOW_TEST_VERBOSE") is not None
def has_full_args_spec(callable):
try:
inspect.getfullargspec(callable)
return True
except Exception:
return False
if api_flag == TEST_TENSOR:
pytorch_tensor = torch.Tensor(1)
pytorch_call = eval(f"pytorch_tensor.{pytorch_callable_name}")
else:
pytorch_call = eval(f"torch.{pytorch_callable_name}")
Spec = namedtuple(
"spec",
"args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations",
)
if has_full_args_spec(pytorch_call):
tmp_spec = inspect.getfullargspec(pytorch_call)
new_defaults = tmp_spec.defaults
if new_defaults is None:
new_defaults = []
new_kwonlydefaults = tmp_spec.kwonlydefaults
if new_kwonlydefaults is None:
new_kwonlydefaults = []
spec = Spec(
tmp_spec.args,
tmp_spec.varargs,
tmp_spec.varkw,
new_defaults,
tmp_spec.kwonlyargs,
new_kwonlydefaults,
tmp_spec.annotations,
)
else:
args = list(extra_annotations.keys()) + list(extra_defaults.keys())
spec = Spec(args, None, None, [], [], {}, {})
annotations = spec.annotations
annotations.update(extra_annotations)
if "return" in annotations:
del annotations["return"]
args = (set(spec.args) | set(spec.kwonlyargs)) - {"self"}
assert args == set(
annotations.keys()
), f"args = {args}, annotations = {annotations.keys()}"
if "input" not in annotations:
annotations.update({"input": torch.Tensor})
def has_default(name):
if name in spec.args:
return (len(spec.args) - spec.args.index(name)) <= len(spec.defaults)
else:
assert name in spec.kwonlyargs
return (len(spec.kwonlyargs) - spec.kwonlyargs.index(name)) <= len(
spec.kwonlydefaults
)
def get_generator(name):
annotation = annotations[name]
if name in extra_generators:
generator = extra_generators[name]
else:
generator = annotation2default_generator[annotation]()
generator = generator.to(annotation)
return generator
while n > 0:
flow_attr_dict = {}
torch_attr_dict = {}
generator_tuple = tuple(
*([get_generator(name) for name in args] + [get_generator("input")])
)
values = generator_tuple.eval()
for i, name in enumerate(args):
torch_data = values[i]
if isinstance(torch_data, Nothing):
continue
flow_data = convert_torch_object_to_flow(torch_data)
if isinstance(torch_data, torch.Tensor):
torch_data = torch_data.to(device)
if isinstance(flow_data, flow.Tensor):
flow_data = flow_data.to(device)
flow_attr_dict[name] = flow_data
torch_attr_dict[name] = torch_data
if verbose:
print(f"attr = {torch_attr_dict}, device = {device}")
torch_input_original = values[-1]
flow_input_original = convert_torch_object_to_flow(torch_input_original)
flow_input_original.requires_grad_(backward)
torch_input_original.requires_grad_(backward)
flow_input, torch_input = (
flow_input_original.to(device),
torch_input_original.to(device),
)
try:
if api_flag == TEST_MODULE:
torch_call = pytorch_call(**torch_attr_dict)
torch_call = torch_call.to(device)
torch_call.train(training)
torch_res = torch_call(torch_input)
state_dict = torch_call.state_dict()
state_dict = {
k: v.detach().cpu().numpy() for k, v in state_dict.items()
}
elif api_flag == TEST_FLOW:
torch_xxx_func = eval(f"torch.{pytorch_callable_name}")
torch_res = torch_xxx_func(torch_input, **torch_attr_dict)
else:
torch_tensor_xxx_func = eval(f"torch_input.{pytorch_callable_name}")
torch_res = torch_tensor_xxx_func(**torch_attr_dict)
loss = torch_res.sum()
loss.backward()
if api_flag == TEST_MODULE:
state_dict = torch_call.state_dict()
state_dict = {
k: v.detach().cpu().numpy() for k, v in state_dict.items()
}
except Exception as e:
if verbose:
print(f"PyTorch error: {e}")
# The random generated test data is not always valid,
# so just skip when PyTorch raises an exception
continue
if api_flag == TEST_MODULE:
flow_call_class = eval(f"flow.{callable_name}")
flow_call = flow_call_class(**flow_attr_dict)
flow_call = flow_call.to(device)
flow_call.train(training)
flow_call.load_state_dict(state_dict)
flow_res = flow_call(flow_input)
elif api_flag == TEST_FLOW:
flow_xxx_func = eval(f"flow.{callable_name}")
flow_res = flow_xxx_func(flow_input, **flow_attr_dict)
else:
flow_tensor_xxx_func = eval(f"flow_input.{callable_name}")
flow_res = flow_tensor_xxx_func(**flow_attr_dict)
loss = flow_res.sum()
loss.backward()
def allclose_or_fail(flow_tensor, torch_tensor):
is_allclose = np.allclose(
flow_tensor.numpy(),
torch_tensor.detach().cpu().numpy(),
rtol=rtol,
atol=atol,
)
test_case.assertTrue(
is_allclose,
f"flow_tensor = {flow_tensor},\ntorch_tensor = {torch_tensor},\nattr_dict = {torch_attr_dict},\nflow_input_tensor = {flow_input_original}",
)
allclose_or_fail(flow_res, torch_res)
allclose_or_fail(flow_input_original.grad, torch_input_original.grad)
if api_flag == TEST_MODULE:
flow_parameters = dict(flow_call.named_parameters())
for name, torch_param in torch_call.named_parameters():
flow_param = flow_parameters[name]
allclose_or_fail(flow_param.grad, torch_param.grad)
if verbose:
print("test passed")
n -= 1
def test_module_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
):
return test_against_pytorch(
test_case=test_case,
callable_name=callable_name,
extra_annotations=extra_annotations,
extra_generators=extra_generators,
extra_defaults=extra_defaults,
device=device,
training=training,
backward=backward,
rtol=rtol,
atol=atol,
n=n,
pytorch_callable_name=pytorch_callable_name,
api_flag=TEST_MODULE,
)
def test_flow_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
):
return test_against_pytorch(
test_case=test_case,
callable_name=callable_name,
extra_annotations=extra_annotations,
extra_generators=extra_generators,
extra_defaults=extra_defaults,
device=device,
training=training,
backward=backward,
rtol=rtol,
atol=atol,
n=n,
pytorch_callable_name=pytorch_callable_name,
api_flag=TEST_FLOW,
)
def test_tensor_against_pytorch(
test_case,
callable_name,
extra_annotations: Optional[Dict[str, Any]] = None,
extra_generators: Optional[Dict[str, Any]] = None,
extra_defaults: Optional[Dict[str, Any]] = None,
device: str = "cuda",
training: bool = True,
backward: bool = True,
rtol=1e-4,
atol=1e-5,
n=20,
pytorch_callable_name=None,
):
return test_against_pytorch(
test_case=test_case,
callable_name=callable_name,
extra_annotations=extra_annotations,
extra_generators=extra_generators,
extra_defaults=extra_defaults,
device=device,
training=training,
backward=backward,
rtol=rtol,
atol=atol,
n=n,
pytorch_callable_name=pytorch_callable_name,
api_flag=TEST_TENSOR,
)
__all__ = [
"random_tensor",
"random_bool",
"random_device",
"random",
"random_or_nothing",
"constant",
"nothing",
"test_module_against_pytorch",
"test_flow_against_pytorch",
"test_tensor_against_pytorch",
]
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections.abc
import inspect
import functools
import os
import torch as torch_original
import oneflow as flow_stable
import oneflow.experimental as flow
import numpy as np
from .generators import generator, random_tensor, Nothing
postulate = [".rand", ".Tensor"]
def torch_tensor_to_flow(x):
return flow.tensor(x.cpu().numpy())
class PyTorchDoesNotSupportError(Exception):
def __init__(self, exc):
self.exc = exc
def __str__(self):
return repr(self)
def __repr__(self):
return f"PyTorch error: {str(self.exc)}"
def get_args(callable, *args, **kwargs):
try:
spec = inspect.getfullargspec(callable)
spec_args = spec.args
if spec_args[0] == "self":
del spec_args[0]
for i, arg in enumerate(args):
arg_name = spec_args[i]
annotation = spec.annotations[arg_name]
if isinstance(arg, generator):
arg.to(annotation)
for arg_name, arg in kwargs.items():
annotation = spec.annotations[arg_name]
if isinstance(arg, generator):
arg.to(annotation)
except:
pass
pytorch_args, pytorch_kwargs, oneflow_args, oneflow_kwargs = [], {}, [], {}
def get_pytorch_value(x):
if isinstance(x, DualObject):
return x.pytorch
return x
def get_oneflow_value(x):
if isinstance(x, DualObject):
return x.oneflow
return x
def get_generator_value(x):
if isinstance(x, generator):
return x.value()
return x
for arg in args:
arg = get_generator_value(arg)
pytorch_args.append(get_pytorch_value(arg))
oneflow_args.append(get_oneflow_value(arg))
for key, value in kwargs.items():
value = get_generator_value(value)
if isinstance(value, Nothing):
continue
pytorch_kwargs[key] = get_pytorch_value(value)
oneflow_kwargs[key] = get_oneflow_value(value)
return pytorch_args, pytorch_kwargs, oneflow_args, oneflow_kwargs
counter = 0
def GetDualObject(name, pytorch, oneflow):
global counter
counter += 1
skipped_magic_methods = [
"__class__",
"__mro__",
"__new__",
"__init__",
"__getattr__",
"__setattr__",
"__getattribute__",
"__dict__",
"__weakref__",
"__builtins__",
"__qualname__",
"__name__",
"__str__",
"__repr__",
]
pytorch_methods = dir(pytorch)
if hasattr(pytorch, "__call__") and "__call__" not in pytorch_methods:
pytorch_methods.append("__call__")
magic_methods_for_new_cls = {}
for method_name in pytorch_methods:
if method_name.startswith("__") and method_name not in skipped_magic_methods:
# init a new 'method_name' variable other than the one in for loop,
# avoid a pitfall:
# https://python.plainenglish.io/python-pitfalls-with-variable-capture-dcfc113f39b7
def get_dual_method(method_name):
# __call__ is special. We should not delegate the '__call__' of the torch wrapper of class 'nn.Conv2d'
# to 'nn.Conv2d.__call__', as 'nn.Conv2d.__call__' belongs to the object of type 'nn.Conv2d'
# (not the class itself)
if method_name == "__call__":
def dual_method(self, *args, **kwargs):
(
pytorch_args,
pytorch_kwargs,
oneflow_args,
oneflow_kwargs,
) = get_args(pytorch, *args, **kwargs)
# use () instead of '__call__'
try:
pytorch_res = pytorch(*pytorch_args, **pytorch_kwargs)
except Exception as e:
raise PyTorchDoesNotSupportError(e)
# only check if the method is a postulate when it is called
if name in postulate:
oneflow_res = torch_tensor_to_flow(pytorch_res)
else:
oneflow_res = oneflow(*oneflow_args, **oneflow_kwargs)
return GetDualObject("unused", pytorch_res, oneflow_res)
else:
def dual_method(self, *args, **kwargs):
pytorch_method = getattr(pytorch, method_name)
oneflow_method = getattr(oneflow, method_name)
(
pytorch_args,
pytorch_kwargs,
oneflow_args,
oneflow_kwargs,
) = get_args(pytorch_method, *args, **kwargs)
try:
pytorch_res = pytorch_method(
*pytorch_args, **pytorch_kwargs
)
except Exception as e:
raise PyTorchDoesNotSupportError(e)
oneflow_res = oneflow_method(*oneflow_args, **oneflow_kwargs)
return GetDualObject("unused", pytorch_res, oneflow_res)
return dual_method
magic_methods_for_new_cls[method_name] = get_dual_method(method_name)
Cls = type(f"{name}_{counter}", (DualObject,), magic_methods_for_new_cls)
return Cls(name, pytorch, oneflow)
class DualObject:
def __init__(self, name, pytorch, oneflow):
self.name = name
self.pytorch = pytorch
self.oneflow = oneflow
if isinstance(pytorch, torch_original.nn.Module):
state_dict = pytorch.state_dict()
state_dict = {k: v.detach().cpu().numpy() for k, v in state_dict.items()}
oneflow.load_state_dict(state_dict)
dual_modules_to_test.append(self)
if isinstance(pytorch, torch_original.Tensor):
dual_objects_to_test.append(self)
def __repr__(self):
return f"PyTorch object:\n{self.pytorch}\n\nOneFlow object:\n{self.oneflow}"
def __getattr__(self, key):
pytorch_attr = getattr(self.pytorch, key)
oneflow_attr = getattr(self.oneflow, key)
new_name = f"{self.name}.{key}"
return GetDualObject(new_name, pytorch_attr, oneflow_attr)
dual_modules_to_test = []
dual_objects_to_test = []
torch_type2checker = {}
def equality_checker(torch_type, flow_type):
def deco(f):
torch_type2checker[(torch_type, flow_type)] = f
return f
return deco
def check_equality(dual_object: DualObject):
checker = torch_type2checker.get(
(type(dual_object.pytorch), type(dual_object.oneflow)), None
)
if checker is None:
for key, value in torch_type2checker.items():
if isinstance(dual_object.pytorch, key[0]) and isinstance(
dual_object.oneflow, key[1]
):
checker = value
break
assert checker is not None
return checker(dual_object.pytorch, dual_object.oneflow)
@equality_checker(torch_original.Tensor, flow.Tensor)
@equality_checker(torch_original.Tensor, flow_stable._oneflow_internal.Tensor)
def check_tensor_equality(torch_tensor, flow_tensor):
# TODO: check dtype
if torch_tensor.grad is not None:
assert (
flow_tensor.grad is not None
), "OneFlow tensor doesn't have grad while PyTorch tensor has one"
if not np.allclose(
torch_tensor.grad.detach().cpu().numpy(), flow_tensor.grad.numpy()
):
return False
return np.allclose(torch_tensor.detach().cpu().numpy(), flow_tensor.numpy())
def autotest(n=20, auto_backward=True, rtol=1e-4, atol=1e-5):
verbose = os.getenv("ONEFLOW_TEST_VERBOSE") is not None
def deco(f):
@functools.wraps(f)
def new_f(test_case):
nonlocal n
while n > 0:
dual_modules_to_test.clear()
dual_objects_to_test.clear()
try:
res = f(test_case)
except PyTorchDoesNotSupportError as e:
if verbose:
print(e)
continue
# TODO: support types other than Tensor, like torch.Size/flow.Size
if res is not None:
if not isinstance(res, collections.abc.Sequence):
res = [res]
for x in res:
if auto_backward:
if isinstance(x.pytorch, torch_original.Tensor):
x.sum().backward()
dual_objects_to_test.append(x)
for x in dual_modules_to_test:
# x.state_dict().values() returns dual object with inconsistent values
for key in x.pytorch.state_dict().keys():
dual_objects_to_test.append(
GetDualObject(
"unused",
x.pytorch.state_dict()[key],
x.oneflow.state_dict()[key],
)
)
for x in dual_objects_to_test:
test_case.assertTrue(check_equality(x))
if verbose:
print("test passed")
n -= 1
return new_f
return deco
def random_pytorch_tensor(
ndim=None, dim0=1, dim1=None, dim2=None, dim3=None, dim4=None, requires_grad=True
):
if isinstance(requires_grad, generator):
requires_grad = requires_grad.value()
pytorch_tensor = (
random_tensor(ndim, dim0, dim1, dim2, dim3, dim4)
.value()
.requires_grad_(requires_grad)
)
flow_tensor = flow.tensor(pytorch_tensor.detach().cpu().numpy(), requires_grad=True)
return GetDualObject("unused", pytorch_tensor, flow_tensor)
torch = GetDualObject("", torch_original, flow)
__all__ = ["torch", "autotest", "random_pytorch_tensor"]
......@@ -12,7 +12,10 @@ def check_unwanted_test_scripts(python_test_dir=None, allowed=None):
os.path.relpath(os.path.join(python_test_dir, a), src_root) for a in allowed
]
for (dirpath, dirnames, filenames) in os.walk(src_root):
if python_test_dir in dirpath and "__pycache__" not in dirpath:
if (
dirpath.startswith(os.path.abspath(python_test_dir) + os.sep)
and "__pycache__" not in dirpath
):
rel_to_python_test = os.path.relpath(dirpath, python_test_dir)
rel_to_src_root = os.path.relpath(dirpath, src_root)
print(f"checking: {rel_to_src_root}")
......@@ -39,7 +42,7 @@ def check_unwanted_test_scripts(python_test_dir=None, allowed=None):
check_unwanted_test_scripts(
python_test_dir=os.path.join(src_root, "oneflow/python/test"),
allowed=["custom_ops", "dataloader", "graph", "models", "modules", "tensor",],
allowed=["custom_ops", "dataloader", "graph", "models", "modules", "tensor"],
)
check_unwanted_test_scripts(
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment