Skip to content
Snippets Groups Projects
Unverified Commit 975d9a96 authored by Xiaoyu Zhang's avatar Xiaoyu Zhang Committed by GitHub
Browse files

Add new autotest (#5617)


* fix upsample nearest bug

* fix upsample nearest bug (#5347)

Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>

* fix upsample bilinear bug

* fix export bug

* fix conflict

* auto format by CI

* fix license

* fix ci error

* Delete version.py

* auto format by CI

Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
Co-authored-by: default avataroneflow-ci-bot <ci-bot@oneflow.org>
parent 8a203cd4
No related branches found
No related tags found
No related merge requests found
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/op_expr_grad_function.h"
#include "oneflow/core/functional/functional.h"
namespace oneflow {
namespace one {
struct ClipByScalarMaxInterpState : public OpExprInterpState {
bool requires_grad;
functional::Scalar max;
};
class ClipByScalarMax : public OpExprGradFunction<ClipByScalarMaxInterpState> {
public:
Maybe<void> Init(const OpExpr& op) override {
const auto* fw_op_expr = dynamic_cast<const UserOpExpr*>(&op);
CHECK_NOTNULL_OR_RETURN(fw_op_expr);
base_attrs_ = MakeAttrMapFromUserOpConf(fw_op_expr->proto());
return Maybe<void>::Ok();
}
Maybe<void> Capture(ClipByScalarMaxInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const override {
CHECK_EQ_OR_RETURN(inputs.size(), 1);
ctx->requires_grad = inputs.at(0)->requires_grad();
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }
ctx->SaveTensorForBackward(inputs.at(0));
ComposedAttrMap composed_attrs(attrs, base_attrs_);
if (IsFloatingDataType(inputs.at(0)->dtype())) {
ctx->max = functional::Scalar(JUST(composed_attrs.GetAttr<double>("floating_max")));
} else if (IsIntegralDataType(inputs.at(0)->dtype())) {
ctx->max = functional::Scalar(JUST(composed_attrs.GetAttr<int64_t>("integral_max")));
} else {
UNIMPLEMENTED_THEN_RETURN() << "Data type is not floating or integral type.";
}
return Maybe<void>::Ok();
}
Maybe<void> Apply(const ClipByScalarMaxInterpState* ctx, const TensorTuple& out_grads,
TensorTuple* in_grads) const override {
CHECK_EQ_OR_RETURN(out_grads.size(), 1);
in_grads->resize(1);
if (ctx->requires_grad) {
const auto& x = ctx->SavedTensors().at(0);
in_grads->at(0) = JUST(functional::ClipByScalarMaxGrad(out_grads.at(0), x, ctx->max));
}
return Maybe<void>::Ok();
}
private:
AttrMap base_attrs_;
};
REGISTER_OP_EXPR_GRAD_FUNCTION("clip_by_scalar_max", ClipByScalarMax);
} // namespace one
} // namespace oneflow
......@@ -199,7 +199,7 @@ class Tanh(Module):
out = \\frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow.Tensor): A Tensor
input (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
......@@ -223,12 +223,12 @@ class Tanh(Module):
def __init__(self):
super().__init__()
def forward(self, x):
return flow.F.tanh(x)
def forward(self, input):
return flow.F.tanh(input)
@register_tensor_op("tanh")
def tanh_op(x):
def tanh_op(input):
"""This operator computes the hyperbolic tangent value of Tensor.
The equation is:
......@@ -258,7 +258,7 @@ def tanh_op(x):
tensor([-0.7616, 0. , 0.7616], dtype=oneflow.float32)
"""
return Tanh()(x)
return Tanh()(input)
class ELU(Module):
......
......@@ -60,7 +60,7 @@ class TestReLUModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
_test_relu_impl(test_case, *arg)
@autotest
@autotest()
def test_relu_module_with_random_data(test_case):
m = torch.nn.ReLU()
m.train(random())
......@@ -101,7 +101,7 @@ class TestReLU6Module(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
_test_relu6_impl(test_case, *arg)
@autotest
@autotest()
def test_relu6_module_with_random_data(test_case):
m = torch.nn.ReLU6()
m.train(random())
......@@ -153,7 +153,7 @@ class TestTanh(flow.unittest.TestCase):
_test_tanh_nn_impl(test_case, *arg)
_test_tanh_function_impl(test_case, *arg)
@autotest
@autotest()
def test_tanh_module_with_random_data(test_case):
m = torch.nn.Tanh()
m.train(random())
......@@ -163,11 +163,11 @@ class TestTanh(flow.unittest.TestCase):
y = m(x)
return y
@autotest
@autotest()
def test_flow_tanh_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = flow.tanh(x)
y = torch.tanh(x)
return y
......@@ -199,7 +199,7 @@ class TestELUModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
_test_elu_function_impl(test_case, *arg)
@autotest
@autotest()
def test_elu_module_with_random_data(test_case):
m = torch.nn.ELU(alpha=random() | nothing())
m.train(random())
......@@ -678,7 +678,8 @@ class TestSoftplusModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest
@unittest.skip("pytorch softplus backward has bug")
@autotest()
def test_softplus_module_with_random_data(test_case):
m = torch.nn.Softplus(beta=random() | nothing(), threshold=random() | nothing())
m.train(random())
......@@ -782,7 +783,7 @@ class TestLeakyReLUModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
_test_leakyrelu_impl(test_case, *arg)
@autotest
@autotest()
def test_leakyrelu_module_with_random_data(test_case):
m = torch.nn.LeakyReLU(negative_slope=random() | nothing())
m.train(random())
......
......@@ -22,6 +22,7 @@ from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from automated_test_util import *
def _test_addmm(test_case, shape, alpha, beta, device):
......@@ -65,6 +66,36 @@ class TestAddmm(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_addmm_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor(ndim=2, dim0=2, dim1=3).to(device)
mat1 = random_pytorch_tensor(ndim=2, dim0=2, dim1=4).to(device)
mat2 = random_pytorch_tensor(ndim=2, dim0=4, dim1=3).to(device)
y = torch.addmm(
input,
mat1,
mat2,
beta=random().to(float) | nothing(),
alpha=random().to(float) | nothing(),
)
return y
@autotest()
def test_addmm_broadcast_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor(ndim=2, dim0=1, dim1=1).to(device)
mat1 = random_pytorch_tensor(ndim=2, dim0=2, dim1=4).to(device)
mat2 = random_pytorch_tensor(ndim=2, dim0=4, dim1=3).to(device)
y = torch.addmm(
input,
mat1,
mat2,
beta=random().to(float) | nothing(),
alpha=random().to(float) | nothing(),
)
return y
if __name__ == "__main__":
unittest.main()
......@@ -516,6 +516,17 @@ class TestBatchNorm(flow.unittest.TestCase):
n=10,
)
@autotest(n=1, auto_backward=False)
def test_batchnorm3d_module_with_random_data(test_case):
channel = random().to(int)
m = torch.nn.BatchNorm2d(num_features=channel, track_running_stats=False)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=4, dim1=channel, requires_grad=False).to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
......@@ -22,6 +22,7 @@ from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from automated_test_util import *
def _test_ceil_impl(test_case, device, shape):
......@@ -46,6 +47,13 @@ class TestCeilModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_ceil_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.ceil(input)
return y
if __name__ == "__main__":
unittest.main()
......@@ -22,6 +22,7 @@ from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from automated_test_util import *
def _test_clamp(test_case, shape, device):
......@@ -106,6 +107,52 @@ class TestClampModule(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_clamp_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clamp(input, min=random().to(float), max=random().to(float))
return y
@autotest()
def test_clamp_min_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clamp(input, min=random().to(float), max=random().to(float))
return y
@autotest()
def test_clamp_max_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clamp(
input, min=random().to(float), max=random().to(float) | nothing()
)
return y
@autotest()
def test_clip_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clip(input, min=random().to(float), max=random().to(float))
return y
@autotest()
def test_clip_min_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clip(input, min=random().to(float), max=random().to(float))
return y
@autotest()
def test_clip_max_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clip(
input, min=random().to(float), max=random().to(float) | nothing()
)
return y
if __name__ == "__main__":
unittest.main()
......@@ -22,6 +22,7 @@ from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from automated_test_util import *
def _test_expm1_impl(test_case, device, shape):
......@@ -46,6 +47,13 @@ class TestExpm1Module(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_expm1_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.expm1(input)
return y
if __name__ == "__main__":
unittest.main()
......@@ -14,7 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import unittest
from collections import OrderedDict
......@@ -714,13 +713,89 @@ class TestTensor(flow.unittest.TestCase):
np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05, equal_nan=True)
)
def test_tensor_addmm_(test_case):
input = flow.Tensor(np.random.randn(2, 6), dtype=flow.float32)
mat1 = flow.Tensor(np.random.randn(2, 3), dtype=flow.float32)
mat2 = flow.Tensor(np.random.randn(3, 6), dtype=flow.float32)
of_out = input.addmm(mat1, mat2, alpha=1, beta=2)
np_out = np.add(2 * input.numpy(), 1 * np.matmul(mat1.numpy(), mat2.numpy()))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
@autotest()
def test_addmm_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor(ndim=2, dim0=2, dim1=3).to(device)
mat1 = random_pytorch_tensor(ndim=2, dim0=2, dim1=4).to(device)
mat2 = random_pytorch_tensor(ndim=2, dim0=4, dim1=3).to(device)
y = input.addmm(
mat1,
mat2,
beta=random().to(float) | nothing(),
alpha=random().to(float) | nothing(),
)
return y
@autotest()
def test_addmm_broadcast_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor(ndim=2, dim0=1, dim1=1).to(device)
mat1 = random_pytorch_tensor(ndim=2, dim0=2, dim1=4).to(device)
mat2 = random_pytorch_tensor(ndim=2, dim0=4, dim1=3).to(device)
y = input.addmm(
mat1,
mat2,
beta=random().to(float) | nothing(),
alpha=random().to(float) | nothing(),
)
return y
@autotest()
def test_clamp_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.clamp(min=random().to(float), max=random().to(float))
return y
@autotest()
def test_clamp_minnone_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.clamp(min=random().to(float) | nothing(), max=random().to(float))
return y
@autotest()
def test_clamp_maxnone_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.clamp(min=random().to(float), max=random().to(float) | nothing())
return y
@autotest()
def test_clip_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.clip(min=random().to(float), max=random().to(float))
return y
@autotest()
def test_clip_minnone_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.clip(min=random().to(float) | nothing(), max=random().to(float))
return y
@autotest()
def test_clip_maxnone_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.clip(min=random().to(float), max=random().to(float) | nothing())
return y
@autotest()
def test_ceil_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.ceil()
return y
@autotest()
def test_expm1_tensor_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = input.expm1()
return y
def test_norm_tensor_function(test_case):
input = flow.Tensor(
......@@ -818,7 +893,7 @@ class TestTensor(flow.unittest.TestCase):
)
def test_tensor_fmod(test_case):
x = flow.Tensor(np.random.uniform(-100, 100, (5, 5)), requires_grad=True)
y = random.uniform(-10, 10)
y = np.random.uniform(-10, 10)
of_out = x.fmod(y)
np_out = np.sign(x.numpy()) * np.abs(np.fmod(x.numpy(), y))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
......@@ -834,7 +909,7 @@ class TestTensor(flow.unittest.TestCase):
)
def test_magic_fmod(test_case):
x = flow.Tensor(np.random.uniform(-100, 100, (5, 5)), requires_grad=True)
y = random.uniform(-10, 10)
y = np.random.uniform(-10, 10)
of_out = x % y
np_out = np.sign(x.numpy()) * np.abs(np.fmod(x.numpy(), y))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
......@@ -844,32 +919,6 @@ class TestTensor(flow.unittest.TestCase):
np.allclose(x.grad.numpy(), np.ones((5, 5)), 0.0001, 0.0001)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_ceil(test_case):
x = flow.Tensor(np.random.randn(2, 3), requires_grad=True)
of_out = x.ceil()
np_out = np.ceil(x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.zeros((2, 3)), 0.0001, 0.0001)
)
def test_tensor_expm1(test_case):
x = flow.Tensor(np.random.randn(2, 3), requires_grad=True)
of_out = x.expm1()
np_out = np.expm1(x.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.exp(x.numpy()), 0.0001, 0.0001)
)
def test_tensor_mish(test_case):
def np_mish(x):
f = 1 + np.exp(x)
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment