Skip to content
Snippets Groups Projects
Unverified Commit 5341044e authored by Tianyu Zhao's avatar Tianyu Zhao Committed by GitHub
Browse files

Add some tests with the new framework for auto testing (#5561)


* Test 'nn.Linear' and 'nn.Identity' with random data

* Test 'nn.PixelShuffle' with random data

* Test 'greater' and 'less' with random data

* Test 'flow.Tensor.flatten' with random data

* Test 'greater' and 'less' with random-sized data

* Test 'nn.CrossEntropyLoss' with random data

* Replace multiple return entries with the 'oneof' generator

* Add 'nothing()' test cases for parameters with default values

* Remove the hard-coded test cases of `nn.CrossEntropyLoss`

* auto format by CI

* Skip the test of 'nn.CrossEntropyLoss' temporarily

Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
Co-authored-by: default avataroneflow-ci-bot <ci-bot@oneflow.org>
parent 45a40ef4
No related branches found
No related tags found
No related merge requests found
......@@ -14,112 +14,34 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
g_test_samples = [
{
"input": np.array(
[
[-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324],
[-0.26332688, 0.27541, 0.30080616, 0.09914763, 0.53522176],
[0.7332028, 0.38375184, -0.2831992, -0.9833142, 0.387824],
]
),
"target": np.array([3, 3, 4], dtype=np.int32),
"ignore_index": 4,
"out": np.array([1.1380, 1.7332, 0.0], dtype=np.float32),
"out_sum": np.array([2.8711782], dtype=np.float32),
"out_mean": np.array([1.4355891], dtype=np.float32),
},
{
"input": np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
),
"target": np.array([[[1, 0], [0, 1]]], dtype=np.int32),
"ignore_index": 1,
"out": np.array([[[0.0, 0.6832], [0.8544, 0.0]]], dtype=np.float32),
"out_sum": np.array([1.5375525], dtype=np.float32),
"out_mean": np.array([0.76877624], dtype=np.float32),
},
{
"input": np.array(
[
[-0.6980871, 0.4765042, -1.969919, 0.28965086, -0.53548324],
[-0.26332688, 0.27541, 0.30080616, 0.09914763, 0.53522176],
[0.7332028, 0.38375184, -0.2831992, -0.9833142, 0.387824],
]
),
"target": np.array([3, 3, 4], dtype=np.int32),
"out": np.array([1.1380, 1.7332, 1.4287], dtype=np.float32),
"out_sum": np.array([4.2999], dtype=np.float32),
"out_mean": np.array([1.4333], dtype=np.float32),
},
{
"input": np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
),
"target": np.array([[[1, 0], [0, 1]]], dtype=np.int32),
"out": np.array([[[0.6882, 0.6832], [0.8544, 1.8006]]], dtype=np.float32),
"out_sum": np.array([4.0263], dtype=np.float32),
"out_mean": np.array([1.0066], dtype=np.float32),
},
{
"input": np.array(
[
[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]],
[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]],
]
),
"target": np.array([[[1, 0], [0, 1]], [[1, 0], [0, 1]]], dtype=np.int32),
"out": np.array(
[
[[0.6882, 0.6832], [0.8544, 1.8006]],
[[0.6882, 0.6832], [0.8544, 1.8006]],
],
dtype=np.float32,
),
"out_sum": np.array([8.0526], dtype=np.float32),
"out_mean": np.array([1.0066], dtype=np.float32),
},
{
"input": np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]),
"target": np.array([[1, 0, 0, 1]], dtype=np.int32),
"out": np.array([[0.6882, 0.6832, 0.8544, 1.8006]], dtype=np.float32,),
"out_sum": np.array([4.0263], dtype=np.float32),
"out_mean": np.array([1.0066], dtype=np.float32),
},
]
from automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestCrossEntropyLossModule(flow.unittest.TestCase):
def test_CrossEntropyLoss(test_case):
global g_test_samples
for sample in g_test_samples:
ignore_index = sample.get("ignore_index", None)
input = flow.Tensor(sample["input"], dtype=flow.float32)
target = flow.Tensor(sample["target"], dtype=flow.int32)
loss = flow.nn.CrossEntropyLoss(reduction=None, ignore_index=ignore_index)
of_out = loss(input, target)
assert np.allclose(of_out.numpy(), sample["out"], 1e-4, 1e-4)
loss_sum = flow.nn.CrossEntropyLoss(
reduction="sum", ignore_index=ignore_index
)
of_out_sum = loss_sum(input, target)
assert np.allclose(of_out_sum.numpy(), sample["out_sum"], 1e-4, 1e-4)
loss_mean = flow.nn.CrossEntropyLoss(
reduction="mean", ignore_index=ignore_index
)
of_out_mean = loss_mean(input, target)
assert np.allclose(of_out_mean.numpy(), sample["out_mean"], 1e-4, 1e-4)
@unittest.skip("nn.CrossEntropyLoss has bug")
@autotest(n=200)
def test_CrossEntropyLoss_with_random_data(test_case):
num_classes = random()
shape = random_tensor(ndim=random(2, 5), dim1=num_classes).value().shape
m = torch.nn.CrossEntropyLoss(
reduction=oneof("none", "sum", "mean", nothing()),
ignore_index=random(0, num_classes) | nothing(),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(len(shape), *shape).to(device)
target = random_pytorch_tensor(
len(shape) - 1, *shape[:1] + shape[2:], low=0, high=num_classes, dtype=int
).to(device)
y = m(x, target)
return y
if __name__ == "__main__":
......
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _gen_random_input():
return np.array(
[
[1.1909, -1.5726, 0.9973, -0.7698, -1.1273],
[1.1354, -1.1815, -1.0553, -0.6178, -2.1103],
]
)
def _test_CrossEntropyLoss_mean(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
np.ones([2, 5]), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="mean")
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss.backward()
target = np.array(
[
[-0.4000, 0.1000, 0.1000, 0.1000, 0.1000],
[0.1000, -0.4000, 0.1000, 0.1000, 0.1000],
]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-4, atol=1e-8)
)
def _test_CrossEntropyLoss_sum(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
np.ones([2, 5]), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="sum")
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss.backward()
target = np.array(
[
[-0.8000, 0.2000, 0.2000, 0.2000, 0.2000],
[0.2000, -0.8000, 0.2000, 0.2000, 0.2000],
]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-4, atol=1e-8)
)
def _test_CrossEntropyLoss_none(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
np.ones([2, 5]), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="none")
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss = loss.sum()
loss.backward()
target = np.array(
[
[-0.8000, 0.2000, 0.2000, 0.2000, 0.2000],
[0.2000, -0.8000, 0.2000, 0.2000, 0.2000],
]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-4, atol=1e-8)
)
def _test_CrossEntropyLoss_mean_with_random_input(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
_gen_random_input(), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="mean")
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss.backward()
target = np.array(
[
[-0.2648, 0.0148, 0.1938, 0.0331, 0.0232],
[0.3515, -0.4654, 0.0393, 0.0609, 0.0137],
]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-2, atol=1e-8)
)
def _test_CrossEntropyLoss_sum_with_random_input(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
_gen_random_input(), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="sum")
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss.backward()
target = np.array(
[
[-0.5297, 0.0297, 0.3875, 0.0662, 0.0463],
[0.7029, -0.9307, 0.0786, 0.1218, 0.0274],
]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-2, atol=1e-8)
)
def _test_CrossEntropyLoss_none_with_random_input(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
_gen_random_input(), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="none")
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss = loss.sum()
loss.backward()
target = np.array(
[
[-0.5297, 0.0297, 0.3875, 0.0662, 0.0463],
[0.7029, -0.9307, 0.0786, 0.1218, 0.0274],
]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-2, atol=1e-8)
)
def _test_CrossEntropyLoss_none_with_ignore_index(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
np.ones([2, 5]), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="none", ignore_index=1)
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss = loss.sum()
loss.backward()
target = np.array(
[[-0.8000, 0.2000, 0.2000, 0.2000, 0.2000], [0.0, 0.0, 0.0, 0.0, 0.0],]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-4, atol=1e-8)
)
def _test_CrossEntropyLoss_mean_with_random_input_with_ignore_index(test_case, device):
label = flow.Tensor(np.array([0, 1]), dtype=flow.int32, device=flow.device(device))
predict = flow.Tensor(
_gen_random_input(), requires_grad=True, device=flow.device(device)
)
CrossEntropyLoss = flow.nn.CrossEntropyLoss(reduction="mean", ignore_index=0)
CrossEntropyLoss = CrossEntropyLoss.to(device)
loss = CrossEntropyLoss(predict, label)
loss.backward()
target = np.array(
[[0.0, 0.0, 0.0, 0.0, 0.0], [0.7030, -0.9307, 0.0786, 0.1218, 0.0274],]
)
test_case.assertTrue(predict.grad is not None)
test_case.assertTrue(
np.allclose(predict.grad.numpy(), target, rtol=1e-2, atol=1e-8)
)
@flow.unittest.skip_unless_1n1d()
class TestCrossEntropyLossModuleGrad(flow.unittest.TestCase):
def test_crossentropyloss_grad(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_CrossEntropyLoss_mean,
_test_CrossEntropyLoss_sum,
_test_CrossEntropyLoss_none,
_test_CrossEntropyLoss_mean_with_random_input,
_test_CrossEntropyLoss_sum_with_random_input,
_test_CrossEntropyLoss_none_with_random_input,
_test_CrossEntropyLoss_none_with_ignore_index,
_test_CrossEntropyLoss_mean_with_random_input_with_ignore_index,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
......@@ -82,6 +82,16 @@ class TestFlattenModule(flow.unittest.TestCase):
y = m(x)
return y
@autotest(auto_backward=False)
def test_tensor_against_pytorch(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = x.flatten(
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
if __name__ == "__main__":
unittest.main()
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
from automated_test_util import *
def _test_greater_normal(test_case, device):
......@@ -63,7 +64,7 @@ def _test_greater_int_scalar(test_case, device):
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_greater_int_tensor_int_scalr(test_case, device):
def _test_greater_int_tensor_int_scalar(test_case, device):
np_arr = np.random.randint(2, size=(2, 3, 4, 5))
input1 = flow.Tensor(np_arr, dtype=flow.int, device=flow.device(device))
input2 = 1
......@@ -83,19 +84,38 @@ def _test_greater_float_scalar(test_case, device):
@flow.unittest.skip_unless_1n1d()
class TestGreater(flow.unittest.TestCase):
def test_greter(test_case):
def test_greater(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_greater_normal,
_test_greater_symbol,
_test_greater_int_scalar,
_test_greater_int_tensor_int_scalr,
_test_greater_int_tensor_int_scalar,
_test_greater_float_scalar,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=60, auto_backward=False)
def test_greater_with_random_data(test_case):
device = random_device()
shape = random_tensor().value().shape
x1 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
x2 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
y = torch.gt(x1, oneof(x2, random().to(int), random().to(float)))
return y
@autotest(n=60, auto_backward=False)
def test_tensor_greater_with_random_data(test_case):
device = random_device()
shape = random_tensor().value().shape
x1 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
x2 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
y1 = x1.gt(oneof(x2, random().to(int), random().to(float)))
y2 = x1 > x2
return y1, y2
if __name__ == "__main__":
unittest.main()
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
from automated_test_util import *
def _test_less_normal(test_case, device):
......@@ -92,6 +93,25 @@ class TestLess(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=60, auto_backward=False)
def test_less_with_random_data(test_case):
device = random_device()
shape = random_tensor().value().shape
x1 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
x2 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
y = torch.lt(x1, oneof(x2, random().to(int).to(float)))
return y
@autotest(n=60, auto_backward=False)
def test_tensor_less_with_random_data(test_case):
device = random_device()
shape = random_tensor().value().shape
x1 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
x2 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
y1 = x1.lt(oneof(x2, random().to(int), random().to(float)))
y2 = x1 < x2
return y1, y2
if __name__ == "__main__":
unittest.main()
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _test_linear_no_bias(test_case, device):
linear = flow.nn.Linear(3, 8, False)
linear = linear.to(device)
input_arr = np.array(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=np.float32,
)
np_weight = np.ones((3, 8)).astype(np.float32)
np_weight.fill(2.3)
x = flow.Tensor(input_arr, device=flow.device(device))
flow.nn.init.constant_(linear.weight, 2.3)
of_out = linear(x)
np_out = np.matmul(input_arr, np_weight)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_linear_with_bias(test_case, device):
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
input_arr = np.array(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=np.float32,
)
np_weight = np.ones((3, 8)).astype(np.float32)
np_weight.fill(2.068758)
np_bias = np.ones((8))
np_bias.fill(0.23)
x = flow.Tensor(input_arr, device=flow.device(device))
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_out = linear(x)
np_out = np.matmul(input_arr, np_weight)
np_out += np_bias
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_linear_3_dimension_input(test_case, device):
input_arr = np.random.randn(2, 3, 4)
x = flow.Tensor(input_arr, device=flow.device(device))
linear = flow.nn.Linear(4, 5, True)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, 5.6)
flow.nn.init.constant_(linear.bias, 0.78)
of_out = linear(x)
np_weight = np.ones((4, 5)).astype(np.float32)
np_weight.fill(5.6)
np_bias = np.ones((5))
np_bias.fill(0.78)
np_out = np.matmul(input_arr, np_weight)
np_out += np_bias
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_linear_4_dimension_input(test_case, device):
input_arr = np.random.randn(4, 5, 6, 7)
x = flow.Tensor(input_arr, device=flow.device(device))
linear = flow.nn.Linear(7, 3, False)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, 11.3)
of_out = linear(x)
np_weight = np.ones((7, 3)).astype(np.float32)
np_weight.fill(11.3)
np_out = np.matmul(input_arr, np_weight)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_identity(test_case, device):
linear = flow.nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
linear = linear.to(device)
x = flow.Tensor(np.random.rand(2, 3, 4, 5), device=flow.device(device))
y = linear(x)
test_case.assertTrue(np.array_equal(x.numpy(), y.numpy()))
def _test_linear_backward_with_bias(test_case, device):
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
x = flow.Tensor(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
device=flow.device(device),
requires_grad=True,
)
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_out = linear(x)
of_out = of_out.sum()
of_out.backward()
np_grad = np.array(
[
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
]
)
test_case.assertTrue(np.allclose(np_grad, x.grad.numpy(), 1e-4, 1e-4))
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
from automated_test_util import *
def _test_linear_no_bias(test_case, device):
linear = flow.nn.Linear(3, 8, False)
linear = linear.to(device)
input_arr = np.array(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=np.float32,
)
np_weight = np.ones((3, 8)).astype(np.float32)
np_weight.fill(2.3)
x = flow.Tensor(input_arr, device=flow.device(device))
flow.nn.init.constant_(linear.weight, 2.3)
of_out = linear(x)
np_out = np.matmul(input_arr, np_weight)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_linear_with_bias(test_case, device):
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
input_arr = np.array(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=np.float32,
)
np_weight = np.ones((3, 8)).astype(np.float32)
np_weight.fill(2.068758)
np_bias = np.ones((8))
np_bias.fill(0.23)
x = flow.Tensor(input_arr, device=flow.device(device))
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_out = linear(x)
np_out = np.matmul(input_arr, np_weight)
np_out += np_bias
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_linear_3_dimension_input(test_case, device):
input_arr = np.random.randn(2, 3, 4)
x = flow.Tensor(input_arr, device=flow.device(device))
linear = flow.nn.Linear(4, 5, True)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, 5.6)
flow.nn.init.constant_(linear.bias, 0.78)
of_out = linear(x)
np_weight = np.ones((4, 5)).astype(np.float32)
np_weight.fill(5.6)
np_bias = np.ones((5))
np_bias.fill(0.78)
np_out = np.matmul(input_arr, np_weight)
np_out += np_bias
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_linear_4_dimension_input(test_case, device):
input_arr = np.random.randn(4, 5, 6, 7)
x = flow.Tensor(input_arr, device=flow.device(device))
linear = flow.nn.Linear(7, 3, False)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, 11.3)
of_out = linear(x)
np_weight = np.ones((7, 3)).astype(np.float32)
np_weight.fill(11.3)
np_out = np.matmul(input_arr, np_weight)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_identity(test_case, device):
linear = flow.nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
linear = linear.to(device)
x = flow.Tensor(np.random.rand(2, 3, 4, 5), device=flow.device(device))
y = linear(x)
test_case.assertTrue(np.array_equal(x.numpy(), y.numpy()))
def _test_linear_backward_with_bias(test_case, device):
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
x = flow.Tensor(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
device=flow.device(device),
requires_grad=True,
)
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_out = linear(x)
of_out = of_out.sum()
of_out.backward()
np_grad = np.array(
[
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
[16.5501, 16.5501, 16.5501],
]
)
test_case.assertTrue(np.allclose(np_grad, x.grad.numpy(), 1e-4, 1e-4))
@flow.unittest.skip_unless_1n1d()
class TestLinear(flow.unittest.TestCase):
def test_linear_forward(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_linear_no_bias,
_test_linear_with_bias,
_test_linear_3_dimension_input,
_test_linear_4_dimension_input,
_test_identity,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_linear_backward(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_linear_backward_with_bias,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
class TestLinear(flow.unittest.TestCase):
def test_linear_forward(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_linear_no_bias,
_test_linear_with_bias,
_test_linear_3_dimension_input,
_test_linear_4_dimension_input,
_test_identity,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_linear_backward(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_linear_backward_with_bias,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_linear_with_random_data(test_case):
input_size = random()
m = torch.nn.Linear(
in_features=input_size, out_features=random(), bias=random() | nothing()
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=2, dim1=input_size).to(device)
y = m(x)
return y
@autotest()
def test_identity_with_random_data(test_case):
m = torch.nn.Identity(
x=random().to(int),
unused_argument1=random().to(float),
unused_argument2=random().to(float),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
from automated_test_util import *
def _np_pixel_shuffle(input, h_factor, w_factor):
......@@ -89,6 +90,18 @@ class TestPixelShuffleModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_pixel_shuffle_with_random_data(test_case):
upscale_factor = random().to(int)
num_channels = upscale_factor * upscale_factor * random().to(int)
m = torch.nn.PixelShuffle(upscale_factor=upscale_factor)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=4, dim1=num_channels).to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
......@@ -121,6 +121,12 @@ class generator:
def __rsub__(self, other):
return neg(self - other)
def __mul__(self, other):
return mul(self, other)
def __rmul__(self, other):
return self * other
def to(self, annotation):
self._to(annotation)
for x in self.children:
......@@ -141,6 +147,16 @@ class add(generator):
return self.a.value() + self.b.value()
class mul(generator):
def __init__(self, a, b):
self.a = pack(a)
self.b = pack(b)
super(mul, self).__init__([self.a, self.b])
def _calc_value(self):
return self.a.value() * self.b.value()
class neg(generator):
def __init__(self, a):
self.a = pack(a)
......@@ -264,7 +280,18 @@ def random_or_nothing(low, high):
@data_generator(torch.Tensor)
class random_tensor(generator):
def __init__(self, ndim=None, dim0=1, dim1=None, dim2=None, dim3=None, dim4=None):
def __init__(
self,
ndim=None,
dim0=1,
dim1=None,
dim2=None,
dim3=None,
dim4=None,
low=0,
high=1,
dtype=float,
):
if ndim is None:
ndim = random(1, 6)
if dim0 is None:
......@@ -283,8 +310,21 @@ class random_tensor(generator):
self.dim2 = pack(dim2).to(int)
self.dim3 = pack(dim3).to(int)
self.dim4 = pack(dim4).to(int)
self.low = pack(low).to(float)
self.high = pack(high).to(float)
self.dtype = pack(dtype)
super().__init__(
[self.ndim, self.dim0, self.dim1, self.dim2, self.dim3, self.dim4]
[
self.ndim,
self.dim0,
self.dim1,
self.dim2,
self.dim3,
self.dim4,
self.low,
self.high,
self.dtype,
]
)
def _calc_value(self):
......@@ -294,6 +334,9 @@ class random_tensor(generator):
dim2 = self.dim2.value()
dim3 = self.dim3.value()
dim4 = self.dim4.value()
low = self.low.value()
high = self.high.value()
dtype = self.dtype.value()
shape = rng.integers(low=1, high=8, size=ndim)
if dim0 is not None:
shape[0] = dim0
......@@ -305,8 +348,14 @@ class random_tensor(generator):
shape[3] = dim3
if ndim == 5:
shape[4] = dim4
np_arr = rng.random(shape)
return torch.Tensor(np_arr)
if dtype == float:
np_arr = rng.random(shape)
return torch.Tensor(np_arr)
elif dtype == int:
np_arr = rng.integers(low=low, high=high, size=shape)
return torch.tensor(np_arr, dtype=torch.int64)
else:
raise NotImplementedError(f"Not implemented dtype {dtype} in random")
@data_generator(bool)
......@@ -627,6 +676,7 @@ __all__ = [
"random_device",
"random",
"random_or_nothing",
"oneof",
"constant",
"nothing",
"test_module_against_pytorch",
......
......@@ -301,14 +301,25 @@ def autotest(n=20, auto_backward=True, rtol=1e-4, atol=1e-5):
def random_pytorch_tensor(
ndim=None, dim0=1, dim1=None, dim2=None, dim3=None, dim4=None, requires_grad=True
ndim=None,
dim0=1,
dim1=None,
dim2=None,
dim3=None,
dim4=None,
low=0,
high=1,
dtype=float,
requires_grad=True,
):
if isinstance(requires_grad, generator):
requires_grad = requires_grad.value()
pytorch_tensor = (
random_tensor(ndim, dim0, dim1, dim2, dim3, dim4)
random_tensor(ndim, dim0, dim1, dim2, dim3, dim4, low, high, dtype)
.value()
.requires_grad_(requires_grad)
.requires_grad_(
requires_grad and dtype != int
) # Only Tensors of floating point dtype can require gradients
)
flow_tensor = flow.tensor(pytorch_tensor.detach().cpu().numpy(), requires_grad=True)
return GetDualObject("unused", pytorch_tensor, flow_tensor)
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment