Skip to content
Snippets Groups Projects
Unverified Commit 8a0fa988 authored by Xiaoyu Zhang's avatar Xiaoyu Zhang Committed by GitHub
Browse files

Add new autotest (#5588)


* fix upsample nearest bug

* fix upsample nearest bug (#5347)

Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>

* fix upsample bilinear bug

* fix export bug

* add new_autotest_part1

* add new_autotest_part1

* add squeeze test

* add permute test

* add hardswish test

* add prelu

* fix bug

Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
parent 8e9e61db
No related branches found
No related tags found
No related merge requests found
......@@ -63,7 +63,6 @@ def squeeze_op(input, dim: Optional[Sequence[int]] = None):
dim = [dim]
elif dim is None:
dim = range(input.ndim)
dim = list(filter(lambda i: input.size(i) == 1, dim))
return Squeeze(dim=dim)(input)
......
......@@ -22,7 +22,11 @@ from typing import Optional, Sequence
class Transpose(Module):
def __init__(
self, dim0, dim1, conjugate: bool = False, batch_axis_non_change: bool = False,
self,
dim0: int,
dim1: int,
conjugate: bool = False,
batch_axis_non_change: bool = False,
) -> None:
super().__init__()
......
......@@ -737,9 +737,15 @@ class TestHardswishModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
_test_hardswish_impl(test_case, *arg)
@autotest()
def test_hardswish_module_with_random_data(test_case):
for device in ["cpu", "cuda"]:
test_module_against_pytorch(test_case, "nn.Hardswish", device=device, n=2)
m = torch.nn.Hardswish()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
def _np_hardtanh_grad(x):
......
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow as flow
from test_util import GenArgList
from automated_test_util import *
def _prelu(input, alpha):
......@@ -85,6 +86,17 @@ class TestPReLU(flow.unittest.TestCase):
_test_prelu_ndims(test_case, *arg)
_test_prelu_grad(test_case, *arg)
@unittest.skip("prelu has bug")
@autotest()
def test_prelu_module_with_random_data(test_case):
m = torch.nn.PReLU(num_parameters=random().to(int), init=random().to(float))
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow as flow
from test_util import GenArgList
from automated_test_util import *
def _test_reshape(test_case, device):
......@@ -42,16 +43,6 @@ def _test_reshape_tuple(test_case, device):
test_case.assertTrue(np.array_equal(of_shape, np_shape))
def _test_tensor_reshape(test_case, device):
x = np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
).astype(np.float32)
input = flow.Tensor(x, device=flow.device(device))
of_shape = input.reshape(shape=[2, 2, 2, -1]).numpy().shape
np_shape = (2, 2, 2, 2)
test_case.assertTrue(np.array_equal(of_shape, np_shape))
def _test_reshape_backward(test_case, device):
x = np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
......@@ -77,13 +68,19 @@ class TestModule(flow.unittest.TestCase):
arg_dict["test_fun"] = [
_test_reshape,
_test_reshape_tuple,
_test_tensor_reshape,
_test_reshape_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_reshape_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = torch.reshape(x, shape=(-1,))
return y
if __name__ == "__main__":
unittest.main()
......@@ -99,25 +99,12 @@ class TestSqueeze(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_flow_squeeze_with_random_data(test_case):
for device in ["cpu", "cuda"]:
test_flow_against_pytorch(
test_case,
"squeeze",
extra_annotations={"dim": int,},
extra_generators={"dim": random(0, 6)},
device=device,
)
def test_flow_tensor_squeeze_with_random_data(test_case):
for device in ["cpu", "cuda"]:
test_tensor_against_pytorch(
test_case,
"squeeze",
extra_annotations={"dim": int},
extra_generators={"dim": random(0, 6)},
device=device,
)
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.squeeze(x, random(1, 3).to(int))
return y
if __name__ == "__main__":
......
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow as flow
from test_util import GenArgList
from automated_test_util import *
def _test_transpose(test_case, device):
......@@ -88,6 +89,13 @@ class TestTranspose(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_transpose_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
return y
if __name__ == "__main__":
unittest.main()
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow as flow
from test_util import GenArgList
from automated_test_util import *
def _test_unsqueeze(test_case, device):
......@@ -69,6 +70,13 @@ class TestUnsqueeze(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_flow_unsqueeze_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.unsqueeze(x, random(1, 3).to(int))
return y
if __name__ == "__main__":
unittest.main()
......@@ -580,6 +580,55 @@ class TestTensor(flow.unittest.TestCase):
np.allclose(of_input.grad.numpy(), np.zeros(shape), 1e-4, 1e-4)
)
def _test_tensor_reshape(test_case):
x = np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
).astype(np.float32)
input = flow.Tensor(x)
of_shape = input.reshape(shape=[2, 2, 2, -1]).numpy().shape
np_shape = (2, 2, 2, 2)
test_case.assertTrue(np.array_equal(of_shape, np_shape))
@autotest()
def test_reshape_tensor_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = x.reshape(shape=(-1,))
return y
@autotest()
def test_tensor_squeeze_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = x.squeeze(random().to(int))
return y
@autotest()
def test_flow_unsqueeze_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = x.unsqueeze(random(1, 3).to(int))
return y
@autotest()
def test_permute_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = x.permute(
random(0, 4).to(int),
random(0, 4).to(int),
random(0, 4).to(int),
random(0, 4).to(int),
)
return y
@autotest()
def test_transpose_tensor_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = x.transpose(dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
return y
def test_tensor_where(test_case):
x = flow.Tensor(
np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]),
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment