From 1e351cf84d2842bc10d1773ab981744609b6412e Mon Sep 17 00:00:00 2001 From: liufengwei0103 <2472937968@qq.com> Date: Mon, 19 Jul 2021 09:53:38 +0800 Subject: [PATCH] registry_callback_fn return maybe (#5456) * modified SetInputArgModifyFn * Delete the CHECK changes in the assign_op.cpp file * Format * Modified the OutputArgModifyFn interface * add return * maybe error stack from CheckAndConstructOp to OutputArgModifier callback function * maybe error stack from CheckAndConstructOp to OutputArgModifier callback function * OutputArgModifier return maybe part_1 * maybe error stack from CheckAndConstructOp to OutputArgModifier callback function * input_arg_modifier return maybe * gen_bw_fn return maybe * bw_gen_fn return maybe * registry_callback_fn return maybe * fix bug after merge master * fix bug Co-authored-by: aishangjj <702572275@qq.com> --- oneflow/user/kernels/same_padding_kernel.cpp | 10 ++-- oneflow/user/ops/cast_to_tick_op.cpp | 2 +- oneflow/user/ops/conv_op.cpp | 6 +- oneflow/user/ops/deconv_op.cpp | 4 +- oneflow/user/ops/elu_op.cpp | 2 +- ...fused_scale_tril_softmax_mask_scale_op.cpp | 8 +-- oneflow/user/ops/gelu_op.cpp | 2 +- oneflow/user/ops/hardsigmoid_op.cpp | 2 +- oneflow/user/ops/hardswish_op.cpp | 2 +- oneflow/user/ops/hardtanh_op.cpp | 6 +- oneflow/user/ops/layer_norm_op.cpp | 2 +- oneflow/user/ops/leaky_relu_op.cpp | 2 +- oneflow/user/ops/nn_util.cpp | 33 ++++++----- oneflow/user/ops/nn_util.h | 14 ++--- oneflow/user/ops/normalization_op.cpp | 4 +- oneflow/user/ops/pack_op.cpp | 2 +- oneflow/user/ops/pad_op.cpp | 8 +-- oneflow/user/ops/parallel_cast_op.cpp | 2 +- oneflow/user/ops/relu_op.cpp | 2 +- oneflow/user/ops/reshape_user_op_util.cpp | 2 +- oneflow/user/ops/same_padding_op.cpp | 55 ++++++++++--------- oneflow/user/ops/sigmoid_op.cpp | 2 +- oneflow/user/ops/softmax_op.cpp | 2 +- oneflow/user/ops/squeeze_op.cpp | 4 +- oneflow/user/ops/test_ops.cpp | 8 +-- oneflow/user/ops/transpose_ops.cpp | 6 +- oneflow/user/ops/unpack_op.cpp | 4 +- 27 files changed, 102 insertions(+), 94 deletions(-) diff --git a/oneflow/user/kernels/same_padding_kernel.cpp b/oneflow/user/kernels/same_padding_kernel.cpp index 138fe30d3..241f30d7e 100644 --- a/oneflow/user/kernels/same_padding_kernel.cpp +++ b/oneflow/user/kernels/same_padding_kernel.cpp @@ -42,8 +42,9 @@ class SamePaddingKernel final : public user_op::OpKernel { for (int32_t i = 0; i < num_spatial_dims; ++i) { int32_t padding_small = 0; int32_t padding_large = 0; - CalcSamePadding(x->shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i), - strides.at(i), &padding_small, &padding_large); + CHECK_JUST(CalcSamePadding(x->shape().At(idx_offset + i), kernel_size.at(i), + dilation_rate.at(i), strides.at(i), &padding_small, + &padding_large)); if (padding == "same_lower") { padding_before[idx_offset + i] = padding_large; } else if (padding == "same_upper") { @@ -123,8 +124,9 @@ class SamePaddingGradKernel final : public user_op::OpKernel { for (int32_t i = 0; i < num_spatial_dims; ++i) { int32_t padding_small = 0; int32_t padding_large = 0; - CalcSamePadding(dx->shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i), - strides.at(i), &padding_small, &padding_large); + CHECK_JUST(CalcSamePadding(dx->shape().At(idx_offset + i), kernel_size.at(i), + dilation_rate.at(i), strides.at(i), &padding_small, + &padding_large)); if (padding == "same_lower") { padding_before[idx_offset + i] = padding_large; } else if (padding == "same_upper") { diff --git a/oneflow/user/ops/cast_to_tick_op.cpp b/oneflow/user/ops/cast_to_tick_op.cpp index c5240419f..36ed5c3b5 100644 --- a/oneflow/user/ops/cast_to_tick_op.cpp +++ b/oneflow/user/ops/cast_to_tick_op.cpp @@ -37,7 +37,7 @@ REGISTER_NO_GRAD_USER_OP("cast_to_tick") const cfg::ParallelDistribution& in_dis_hint = ctx->ParallelDistributionHint4InputArgNameAndIndex("in", 0); const Shape& parallel_hierarchy = ctx->parallel_hierarchy(); - CHECK_EQ(in_dis_hint.sbp_parallel_size(), parallel_hierarchy.NumAxes()); + CHECK_EQ_OR_RETURN(in_dis_hint.sbp_parallel_size(), parallel_hierarchy.NumAxes()); cfg::ParallelDistribution* in_distribution = ctx->ParallelDistribution4ArgNameAndIndex("in", 0); diff --git a/oneflow/user/ops/conv_op.cpp b/oneflow/user/ops/conv_op.cpp index e29c110ee..6012a53ef 100644 --- a/oneflow/user/ops/conv_op.cpp +++ b/oneflow/user/ops/conv_op.cpp @@ -23,7 +23,7 @@ namespace { template<size_t NDims> Maybe<void> InferTensorDesc4Conv(user_op::InferContext* ctx) { const user_op::TensorDesc& in = ctx->InputTensorDesc("in", 0); - CHECK_EQ(NDims + 2, in.shape().NumAxes()); + CHECK_EQ_OR_RETURN(NDims + 2, in.shape().NumAxes()); auto data_format = ctx->Attr<std::string>("data_format"); auto kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size"); @@ -44,8 +44,8 @@ Maybe<void> InferTensorDesc4Conv(user_op::InferContext* ctx) { const size_t c_dim = data_format == "channels_first" ? 1 : NDims + 1; out_shape.at(c_dim) = filters; for (int32_t i = 0; i < NDims; ++i) { - CalcConvOut(in.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i), - strides.at(i), padding_before.at(i), &out_shape.at(idx_offset + i)); + JUST(CalcConvOut(in.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i), + strides.at(i), padding_before.at(i), &out_shape.at(idx_offset + i))); } *out->mut_is_dynamic() = in.is_dynamic(); *out->mut_shape() = Shape(out_shape); diff --git a/oneflow/user/ops/deconv_op.cpp b/oneflow/user/ops/deconv_op.cpp index bf0532b6d..387b3da2f 100644 --- a/oneflow/user/ops/deconv_op.cpp +++ b/oneflow/user/ops/deconv_op.cpp @@ -23,7 +23,7 @@ namespace { template<size_t NDims> Maybe<void> InferTensorDesc4DeConv(user_op::InferContext* ctx) { const user_op::TensorDesc& in = ctx->InputTensorDesc("in", 0); - CHECK_EQ(NDims + 2, in.shape().NumAxes()); + CHECK_EQ_OR_RETURN(NDims + 2, in.shape().NumAxes()); const std::string& data_format = ctx->Attr<std::string>("data_format"); const auto& kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size"); @@ -69,7 +69,7 @@ Maybe<void> InferTensorDesc4DeConv(user_op::InferContext* ctx) { for (size_t i = 0; i < NDims; ++i) { weight_shape.at(idx_offset + i) = kernel_size.at(i); } const user_op::TensorDesc& weight = ctx->InputTensorDesc("weight", 0); - CHECK_EQ(weight.shape(), Shape(weight_shape)); + CHECK_EQ_OR_RETURN(weight.shape(), Shape(weight_shape)); } return Maybe<void>::Ok(); diff --git a/oneflow/user/ops/elu_op.cpp b/oneflow/user/ops/elu_op.cpp index fd3ab133d..13cf0de77 100644 --- a/oneflow/user/ops/elu_op.cpp +++ b/oneflow/user/ops/elu_op.cpp @@ -51,7 +51,7 @@ REGISTER_USER_OP("elu_grad") const Shape& x_shape = ctx->InputShape("x", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == x_shape); + CHECK_OR_RETURN(dy_shape == x_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp b/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp index 88e3fc591..f0905fd8f 100644 --- a/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp +++ b/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp @@ -51,7 +51,7 @@ REGISTER_USER_OP("fused_tril_scale_softmax_mask_scale") }) .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> { const user_op::TensorDesc& x_tensor = ctx->LogicalTensorDesc4InputArgNameAndIndex("x", 0); - CHECK_GE(x_tensor.shape().NumAxes(), 2); + CHECK_GE_OR_RETURN(x_tensor.shape().NumAxes(), 2); FOR_RANGE(int64_t, axis, 0, x_tensor.shape().NumAxes() - 2) { ctx->NewBuilder() .Split(user_op::OpArg("x", 0), axis) @@ -75,7 +75,7 @@ REGISTER_USER_OP("fused_tril_scale_softmax_mask_scale_grad") const user_op::TensorDesc& softmax_y_desc = ctx->InputTensorDesc("softmax_y", 0); const user_op::TensorDesc& dy_desc = ctx->InputTensorDesc("dy", 0); user_op::TensorDesc* dx_desc = ctx->OutputTensorDesc("dx", 0); - CHECK(dy_desc.shape() == softmax_y_desc.shape()); + CHECK_OR_RETURN(dy_desc.shape() == softmax_y_desc.shape()); *dx_desc->mut_shape() = dy_desc.shape(); *dx_desc->mut_is_dynamic() = dy_desc.is_dynamic(); return Maybe<void>::Ok(); @@ -84,13 +84,13 @@ REGISTER_USER_OP("fused_tril_scale_softmax_mask_scale_grad") const user_op::TensorDesc& softmax_y_desc = ctx->InputTensorDesc("softmax_y", 0); const user_op::TensorDesc& dy_desc = ctx->InputTensorDesc("dy", 0); user_op::TensorDesc* dx_desc = ctx->OutputTensorDesc("dx", 0); - CHECK(dy_desc.data_type() == softmax_y_desc.data_type()); + CHECK_OR_RETURN(dy_desc.data_type() == softmax_y_desc.data_type()); *dx_desc->mut_data_type() = dy_desc.data_type(); return Maybe<void>::Ok(); }) .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> { const user_op::TensorDesc& dy_tensor = ctx->LogicalTensorDesc4InputArgNameAndIndex("dy", 0); - CHECK_GE(dy_tensor.shape().NumAxes(), 2); + CHECK_GE_OR_RETURN(dy_tensor.shape().NumAxes(), 2); FOR_RANGE(int64_t, axis, 0, dy_tensor.shape().NumAxes() - 2) { ctx->NewBuilder() .Split(user_op::OpArg("softmax_y", 0), axis) diff --git a/oneflow/user/ops/gelu_op.cpp b/oneflow/user/ops/gelu_op.cpp index 902fadbde..d2f374052 100644 --- a/oneflow/user/ops/gelu_op.cpp +++ b/oneflow/user/ops/gelu_op.cpp @@ -49,7 +49,7 @@ REGISTER_USER_OP("gelu_grad") const Shape& x_shape = ctx->InputShape("x", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == x_shape); + CHECK_OR_RETURN(dy_shape == x_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/hardsigmoid_op.cpp b/oneflow/user/ops/hardsigmoid_op.cpp index 1ec34740e..cdf43671a 100644 --- a/oneflow/user/ops/hardsigmoid_op.cpp +++ b/oneflow/user/ops/hardsigmoid_op.cpp @@ -51,7 +51,7 @@ REGISTER_USER_OP("hardsigmoid_grad") const Shape& x_shape = ctx->InputShape("x", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == x_shape); + CHECK_OR_RETURN(dy_shape == x_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/hardswish_op.cpp b/oneflow/user/ops/hardswish_op.cpp index ffef66123..45d0ebe23 100644 --- a/oneflow/user/ops/hardswish_op.cpp +++ b/oneflow/user/ops/hardswish_op.cpp @@ -49,7 +49,7 @@ REGISTER_USER_OP("hardswish_grad") const Shape& x_shape = ctx->InputShape("x", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == x_shape); + CHECK_OR_RETURN(dy_shape == x_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/hardtanh_op.cpp b/oneflow/user/ops/hardtanh_op.cpp index 32e0a69fb..2962c49e9 100644 --- a/oneflow/user/ops/hardtanh_op.cpp +++ b/oneflow/user/ops/hardtanh_op.cpp @@ -30,7 +30,7 @@ REGISTER_USER_OP("hardtanh") *out_shape = in_shape; double min_val = ctx->Attr<double>("min_val"); double max_val = ctx->Attr<double>("max_val"); - CHECK_LE(min_val, max_val); + CHECK_LE_OR_RETURN(min_val, max_val); return Maybe<void>::Ok(); }) .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> { @@ -58,11 +58,11 @@ REGISTER_USER_OP("hardtanh_grad") const Shape& y_shape = ctx->InputShape("y", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == y_shape); + CHECK_OR_RETURN(dy_shape == y_shape); *dx_shape = dy_shape; double min_val = ctx->Attr<double>("min_val"); double max_val = ctx->Attr<double>("max_val"); - CHECK_LE(min_val, max_val); + CHECK_LE_OR_RETURN(min_val, max_val); return Maybe<void>::Ok(); }) .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> { diff --git a/oneflow/user/ops/layer_norm_op.cpp b/oneflow/user/ops/layer_norm_op.cpp index f797d4eea..ded63f98a 100644 --- a/oneflow/user/ops/layer_norm_op.cpp +++ b/oneflow/user/ops/layer_norm_op.cpp @@ -140,7 +140,7 @@ REGISTER_USER_OP("layer_norm_grad") user_op::TensorDesc* dx = ctx->OutputTensorDesc("dx", 0); CHECK_EQ_OR_RETURN(dy.shape(), x.shape()); const int64_t begin_norm_axis = ctx->Attr<int64_t>("begin_norm_axis"); - CHECK_GT(begin_norm_axis, 0); + CHECK_GT_OR_RETURN(begin_norm_axis, 0); const Shape& bn_param_shape = InferBnParamShape(x.shape(), begin_norm_axis); CHECK_EQ_OR_RETURN(mean.shape(), bn_param_shape); CHECK_EQ_OR_RETURN(inv_variance.shape(), bn_param_shape); diff --git a/oneflow/user/ops/leaky_relu_op.cpp b/oneflow/user/ops/leaky_relu_op.cpp index a0d65badd..f48b34aad 100644 --- a/oneflow/user/ops/leaky_relu_op.cpp +++ b/oneflow/user/ops/leaky_relu_op.cpp @@ -48,7 +48,7 @@ REGISTER_USER_OP("leaky_relu_grad") const Shape& x_shape = ctx->InputShape("x", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == x_shape); + CHECK_OR_RETURN(dy_shape == x_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/nn_util.cpp b/oneflow/user/ops/nn_util.cpp index 52d031d6c..a9c212346 100644 --- a/oneflow/user/ops/nn_util.cpp +++ b/oneflow/user/ops/nn_util.cpp @@ -17,11 +17,11 @@ limitations under the License. namespace oneflow { -void CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, - int32_t stride, const std::string& padding_type, int64_t* output_size, - int32_t* padding_before, int32_t* padding_after) { - CHECK_GT(stride, 0); - CHECK_GE(dilation_rate, 1); +Maybe<void> CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, + int32_t stride, const std::string& padding_type, int64_t* output_size, + int32_t* padding_before, int32_t* padding_after) { + CHECK_GT_OR_RETURN(stride, 0); + CHECK_GE_OR_RETURN(dilation_rate, 1); int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1; if (padding_type == "valid") { @@ -41,13 +41,14 @@ void CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation } else { UNIMPLEMENTED(); } - if (output_size) { CHECK_GE((*output_size), 0); } + if (output_size) { CHECK_GE_OR_RETURN((*output_size), 0); } + return Maybe<void>::Ok(); } -void CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride, - int32_t* padding_small, int32_t* padding_large) { - CHECK_GT(stride, 0); - CHECK_GE(dilation_rate, 1); +Maybe<void> CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, + int32_t stride, int32_t* padding_small, int32_t* padding_large) { + CHECK_GT_OR_RETURN(stride, 0); + CHECK_GE_OR_RETURN(dilation_rate, 1); int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1; int64_t tmp_output_size = (input_size + stride - 1) / stride; @@ -55,18 +56,20 @@ void CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_r 0, static_cast<int32_t>((tmp_output_size - 1) * stride + effective_filter_size - input_size)); if (padding_small) { *padding_small = padding_needed / 2; } if (padding_large) { *padding_large = padding_needed - padding_needed / 2; } + return Maybe<void>::Ok(); } -void CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride, - int32_t padding_before, int64_t* output_size) { - CHECK_GT(stride, 0); - CHECK_GE(dilation_rate, 1); +Maybe<void> CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate, + int32_t stride, int32_t padding_before, int64_t* output_size) { + CHECK_GT_OR_RETURN(stride, 0); + CHECK_GE_OR_RETURN(dilation_rate, 1); int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1; if (output_size) { *output_size = (input_size + 2 * padding_before - effective_filter_size + stride) / stride; - CHECK_GE((*output_size), 0); + CHECK_GE_OR_RETURN((*output_size), 0); } + return Maybe<void>::Ok(); } const size_t IdxOffset(const std::string& data_format) { diff --git a/oneflow/user/ops/nn_util.h b/oneflow/user/ops/nn_util.h index 002a82938..c6bd15c31 100644 --- a/oneflow/user/ops/nn_util.h +++ b/oneflow/user/ops/nn_util.h @@ -20,15 +20,15 @@ limitations under the License. namespace oneflow { -void CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, - int32_t stride, const std::string& padding_type, int64_t* output_size, - int32_t* padding_before, int32_t* padding_after); +Maybe<void> CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, + int32_t stride, const std::string& padding_type, int64_t* output_size, + int32_t* padding_before, int32_t* padding_after); -void CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride, - int32_t* padding_small, int32_t* padding_large); +Maybe<void> CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, + int32_t stride, int32_t* padding_small, int32_t* padding_large); -void CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride, - int32_t padding_before, int64_t* output_size); +Maybe<void> CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate, + int32_t stride, int32_t padding_before, int64_t* output_size); const size_t IdxOffset(const std::string& data_format); const int32_t ChannelIdx(const std::string& data_format, int32_t num_axes); diff --git a/oneflow/user/ops/normalization_op.cpp b/oneflow/user/ops/normalization_op.cpp index e31d8fce0..e61e65e87 100644 --- a/oneflow/user/ops/normalization_op.cpp +++ b/oneflow/user/ops/normalization_op.cpp @@ -146,7 +146,7 @@ user_op::TensorDescInferFn MakeFwTensorDescInferFn( JUST(SetParamTensorDesc("mean")); JUST(SetParamTensorDesc("inv_variance")); if (ctx->has_output("reserve_space", 0)) { - CHECK(reserve_space_infer_fn); + CHECK_OR_RETURN(reserve_space_infer_fn); reserve_space_infer_fn(ctx, &x, ctx->OutputTensorDesc("reserve_space", 0)); } return Maybe<void>::Ok(); @@ -178,7 +178,7 @@ user_op::DataTypeInferFn MakeFwDataTypeInferFn( JUST(SetParamDataType("mean")); JUST(SetParamDataType("inv_variance")); if (ctx->has_output("reserve_space", 0)) { - CHECK(reserve_space_infer_fn); + CHECK_OR_RETURN(reserve_space_infer_fn); reserve_space_infer_fn(ctx, &x, ctx->OutputTensorDesc("reserve_space", 0)); } return Maybe<void>::Ok(); diff --git a/oneflow/user/ops/pack_op.cpp b/oneflow/user/ops/pack_op.cpp index 62f8d04b2..97920bf52 100644 --- a/oneflow/user/ops/pack_op.cpp +++ b/oneflow/user/ops/pack_op.cpp @@ -26,7 +26,7 @@ REGISTER_USER_OP("pack") .SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> { const user_op::TensorDesc& in_desc = ctx->InputTensorDesc("in", 0); const Shape& in_shape = in_desc.shape(); - CHECK_GT(in_shape.NumAxes(), 0); + CHECK_GT_OR_RETURN(in_shape.NumAxes(), 0); user_op::TensorDesc* out_desc = ctx->OutputTensorDesc("out", 0); *out_desc->mut_is_dynamic() = in_desc.is_dynamic(); *out_desc->mut_shape() = in_desc.shape(); diff --git a/oneflow/user/ops/pad_op.cpp b/oneflow/user/ops/pad_op.cpp index 27604575e..a446b29ce 100644 --- a/oneflow/user/ops/pad_op.cpp +++ b/oneflow/user/ops/pad_op.cpp @@ -29,8 +29,8 @@ REGISTER_USER_OP("pad") const Shape& x_shape = ctx->InputShape("x", 0); const auto& padding_before = ctx->Attr<std::vector<int64_t>>("padding_before"); const auto& padding_after = ctx->Attr<std::vector<int64_t>>("padding_after"); - CHECK_EQ(padding_before.size(), x_shape.NumAxes()); - CHECK_EQ(padding_after.size(), x_shape.NumAxes()); + CHECK_EQ_OR_RETURN(padding_before.size(), x_shape.NumAxes()); + CHECK_EQ_OR_RETURN(padding_after.size(), x_shape.NumAxes()); DimVector y_dim_vec(x_shape.NumAxes()); FOR_RANGE(int64_t, i, 0, x_shape.NumAxes()) { y_dim_vec[i] = x_shape.At(i) + padding_before[i] + padding_after[i]; @@ -68,8 +68,8 @@ REGISTER_USER_OP("pad_grad") const Shape& dy_shape = ctx->InputShape("dy", 0); const auto& padding_before = ctx->Attr<std::vector<int64_t>>("padding_before"); const auto& padding_after = ctx->Attr<std::vector<int64_t>>("padding_after"); - CHECK_EQ(padding_before.size(), dy_shape.NumAxes()); - CHECK_EQ(padding_after.size(), dy_shape.NumAxes()); + CHECK_EQ_OR_RETURN(padding_before.size(), dy_shape.NumAxes()); + CHECK_EQ_OR_RETURN(padding_after.size(), dy_shape.NumAxes()); DimVector dx_dim_vec(dy_shape.NumAxes()); FOR_RANGE(int64_t, i, 0, dy_shape.NumAxes()) { dx_dim_vec[i] = dy_shape.At(i) - padding_before[i] - padding_after[i]; diff --git a/oneflow/user/ops/parallel_cast_op.cpp b/oneflow/user/ops/parallel_cast_op.cpp index 70fcc6a11..b31cf919f 100644 --- a/oneflow/user/ops/parallel_cast_op.cpp +++ b/oneflow/user/ops/parallel_cast_op.cpp @@ -67,7 +67,7 @@ REGISTER_USER_OP_GRAD("parallel_cast") ctx->FwOp().BindGradTensorWithOpInput(ctx->FwOp().GetGradTensorWithOpOutput("out", 0), "in", 0); } else { - CHECK(IsValidSbpParallelString(grad_sbp_parallel_str)); + CHECK_OR_RETURN(IsValidSbpParallelString(grad_sbp_parallel_str)); const std::string grad_op_name = "System-AutoGrad-" + ctx->FwOp().op_name(); ctx->DefineOp(grad_op_name, [&](user_op::BackwardOpBuilder& builder) { return builder.OpTypeName("parallel_cast") diff --git a/oneflow/user/ops/relu_op.cpp b/oneflow/user/ops/relu_op.cpp index f9c4d7582..d2ae5b6bf 100644 --- a/oneflow/user/ops/relu_op.cpp +++ b/oneflow/user/ops/relu_op.cpp @@ -51,7 +51,7 @@ REGISTER_USER_OP("relu_grad") const Shape& y_shape = ctx->InputShape("y", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == y_shape); + CHECK_OR_RETURN(dy_shape == y_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/reshape_user_op_util.cpp b/oneflow/user/ops/reshape_user_op_util.cpp index f570b5d88..5ac9a1ddd 100644 --- a/oneflow/user/ops/reshape_user_op_util.cpp +++ b/oneflow/user/ops/reshape_user_op_util.cpp @@ -70,7 +70,7 @@ Maybe<void> ReshapeUserOpUtil::GetGroupStartInAxis2OutAxis( HashMap<int, int>* group_start_in_axis2out_axis) { CHECK_NE_OR_RETURN(in_shape.NumAxes(), 0); CHECK_NE_OR_RETURN(out_shape.NumAxes(), 0); - CHECK_EQ(in_shape.elem_cnt(), out_shape.elem_cnt()); + CHECK_EQ_OR_RETURN(in_shape.elem_cnt(), out_shape.elem_cnt()); int in_axis = in_shape.NumAxes() - 1; int out_axis = out_shape.NumAxes() - 1; while (in_axis >= 0 && out_axis >= 0) { diff --git a/oneflow/user/ops/same_padding_op.cpp b/oneflow/user/ops/same_padding_op.cpp index ab516f0bb..b54d705df 100644 --- a/oneflow/user/ops/same_padding_op.cpp +++ b/oneflow/user/ops/same_padding_op.cpp @@ -20,6 +20,34 @@ limitations under the License. namespace oneflow { namespace user_op { +namespace { +Maybe<void> SamePaddingTensorDescInferFn(user_op::InferContext* ctx) { + const TensorDesc& x_desc = ctx->InputTensorDesc("x", 0); + TensorDesc* y_desc = ctx->OutputTensorDesc("y", 0); + *y_desc->mut_shape() = x_desc.shape(); + *y_desc->mut_is_dynamic() = x_desc.is_dynamic(); + const std::string& data_format = ctx->Attr<std::string>("data_format"); + const auto& kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size"); + const auto& strides = ctx->Attr<std::vector<int32_t>>("strides"); + const auto& dilation_rate = ctx->Attr<std::vector<int32_t>>("dilation_rate"); + const size_t idx_offset = IdxOffset(data_format); + const int32_t num_spatial_dims = x_desc.shape().NumAxes() - 2; + CHECK_EQ_OR_RETURN(num_spatial_dims, kernel_size.size()); + CHECK_EQ_OR_RETURN(num_spatial_dims, strides.size()); + CHECK_EQ_OR_RETURN(num_spatial_dims, dilation_rate.size()); + DimVector y_dim_vec(x_desc.shape().dim_vec()); + for (int32_t i = 0; i < num_spatial_dims; ++i) { + int32_t padding_small = 0; + int32_t padding_large = 0; + JUST(CalcSamePadding(x_desc.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i), + strides.at(i), &padding_small, &padding_large)); + y_dim_vec[idx_offset + i] = x_desc.shape().At(idx_offset + i) + padding_small + padding_large; + } + *y_desc->mut_shape() = Shape(y_dim_vec); + return Maybe<void>::Ok(); +} +} // namespace + REGISTER_USER_OP("same_padding") .Input("x") .Output("y") @@ -28,32 +56,7 @@ REGISTER_USER_OP("same_padding") .Attr<std::vector<int32_t>>("kernel_size") .Attr<std::vector<int32_t>>("strides") .Attr<std::vector<int32_t>>("dilation_rate") - .SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> { - const TensorDesc& x_desc = ctx->InputTensorDesc("x", 0); - TensorDesc* y_desc = ctx->OutputTensorDesc("y", 0); - *y_desc->mut_shape() = x_desc.shape(); - *y_desc->mut_is_dynamic() = x_desc.is_dynamic(); - const std::string& data_format = ctx->Attr<std::string>("data_format"); - const auto& kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size"); - const auto& strides = ctx->Attr<std::vector<int32_t>>("strides"); - const auto& dilation_rate = ctx->Attr<std::vector<int32_t>>("dilation_rate"); - const size_t idx_offset = IdxOffset(data_format); - const int32_t num_spatial_dims = x_desc.shape().NumAxes() - 2; - CHECK_EQ_OR_RETURN(num_spatial_dims, kernel_size.size()); - CHECK_EQ_OR_RETURN(num_spatial_dims, strides.size()); - CHECK_EQ_OR_RETURN(num_spatial_dims, dilation_rate.size()); - DimVector y_dim_vec(x_desc.shape().dim_vec()); - for (int32_t i = 0; i < num_spatial_dims; ++i) { - int32_t padding_small = 0; - int32_t padding_large = 0; - CalcSamePadding(x_desc.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i), - strides.at(i), &padding_small, &padding_large); - y_dim_vec[idx_offset + i] = - x_desc.shape().At(idx_offset + i) + padding_small + padding_large; - } - *y_desc->mut_shape() = Shape(y_dim_vec); - return Maybe<void>::Ok(); - }) + .SetTensorDescInferFn(SamePaddingTensorDescInferFn) .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> { const int32_t num_axes = ctx->LogicalTensorDesc4InputArgNameAndIndex("x_like", 0).shape().NumAxes(); diff --git a/oneflow/user/ops/sigmoid_op.cpp b/oneflow/user/ops/sigmoid_op.cpp index 6d0e393c7..3af60af64 100644 --- a/oneflow/user/ops/sigmoid_op.cpp +++ b/oneflow/user/ops/sigmoid_op.cpp @@ -51,7 +51,7 @@ REGISTER_USER_OP("sigmoid_grad") const Shape& y_shape = ctx->InputShape("y", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == y_shape); + CHECK_OR_RETURN(dy_shape == y_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/softmax_op.cpp b/oneflow/user/ops/softmax_op.cpp index 67491a6cc..e4c8ad8b7 100644 --- a/oneflow/user/ops/softmax_op.cpp +++ b/oneflow/user/ops/softmax_op.cpp @@ -49,7 +49,7 @@ REGISTER_USER_OP("softmax_grad") const Shape& y_shape = ctx->InputShape("y", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == y_shape); + CHECK_OR_RETURN(dy_shape == y_shape); *dx_shape = dy_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/squeeze_op.cpp b/oneflow/user/ops/squeeze_op.cpp index 9cc78594c..33691aab8 100644 --- a/oneflow/user/ops/squeeze_op.cpp +++ b/oneflow/user/ops/squeeze_op.cpp @@ -23,8 +23,8 @@ Maybe<void> TransformNegativeAxesToPositive(const std::vector<int32_t>& axes_vec const int32_t num_axes, AxisVector* fixed_axes_vec) { fixed_axes_vec->resize(axes_vec.size()); FOR_RANGE(size_t, i, 0, fixed_axes_vec->size()) { - CHECK_GE(axes_vec[i], -num_axes); - CHECK_LT(axes_vec[i], num_axes); + CHECK_GE_OR_RETURN(axes_vec[i], -num_axes); + CHECK_LT_OR_RETURN(axes_vec[i], num_axes); fixed_axes_vec->at(i) = axes_vec[i] >= 0 ? axes_vec[i] : axes_vec[i] + num_axes; } return Maybe<void>::Ok(); diff --git a/oneflow/user/ops/test_ops.cpp b/oneflow/user/ops/test_ops.cpp index d44cdc73d..efbff608f 100644 --- a/oneflow/user/ops/test_ops.cpp +++ b/oneflow/user/ops/test_ops.cpp @@ -44,7 +44,7 @@ REGISTER_USER_OP("ccrelu_grad") const Shape& y_shape = ctx->InputShape("y", 0); const Shape& dy_shape = ctx->InputShape("dy", 0); Shape* dx_shape = ctx->OutputShape("dx", 0); - CHECK(dy_shape == y_shape); + CHECK_OR_RETURN(dy_shape == y_shape); *dx_shape = y_shape; return Maybe<void>::Ok(); }) @@ -85,7 +85,7 @@ REGISTER_USER_OP("TestReshape") const Shape& in_shape = ctx->InputShape("in", 0); Shape* out_shape = ctx->OutputShape("out", 0); const Shape& conf_shape = ctx->Attr<Shape>("shape"); - CHECK_EQ(in_shape.NumAxes(), conf_shape.NumAxes()); + CHECK_EQ_OR_RETURN(in_shape.NumAxes(), conf_shape.NumAxes()); *out_shape = conf_shape; return Maybe<void>::Ok(); }) @@ -152,7 +152,7 @@ REGISTER_USER_OP("TestSourceMultiGpuFixedOutNum") *out_shape = Shape({bs.At(parallel_ctx.parallel_id()).size()}); const cfg::SbpParallel& out_sbp = ctx->SbpParallel4ArgNameAndIndex("out", 0); - CHECK(out_sbp.has_split_parallel() && out_sbp.split_parallel().axis() == 0); + CHECK_OR_RETURN(out_sbp.has_split_parallel() && out_sbp.split_parallel().axis() == 0); return Maybe<void>::Ok(); }) .SetDataTypeInferFn([](user_op::InferContext* ctx) -> Maybe<void> { @@ -176,7 +176,7 @@ REGISTER_USER_OP("TestMultiInput") const Shape& x1_shape = ctx->InputShape("x1", 0); const Shape& x2_shape = ctx->InputShape("x2", 0); Shape* y_shape = ctx->OutputShape("y", 0); - CHECK(x1_shape == x2_shape); + CHECK_OR_RETURN(x1_shape == x2_shape); *y_shape = x1_shape; return Maybe<void>::Ok(); }) diff --git a/oneflow/user/ops/transpose_ops.cpp b/oneflow/user/ops/transpose_ops.cpp index 05ed4bc1a..8a3b849ef 100644 --- a/oneflow/user/ops/transpose_ops.cpp +++ b/oneflow/user/ops/transpose_ops.cpp @@ -55,12 +55,12 @@ REGISTER_USER_OP("transpose") const user_op::TensorDesc& input_tensor = ctx->LogicalTensorDesc4InputArgNameAndIndex("input", 0); const auto& perm = ctx->Attr<std::vector<int32_t>>("perm"); - CHECK_EQ(perm.size(), input_tensor.shape().NumAxes()); + CHECK_EQ_OR_RETURN(perm.size(), input_tensor.shape().NumAxes()); FOR_RANGE(int32_t, i, 0, perm.size()) { int32_t axis = perm.at(i); if (axis < 0) { axis += perm.size(); } - CHECK_GE(axis, 0); - CHECK_LT(axis, perm.size()); + CHECK_GE_OR_RETURN(axis, 0); + CHECK_LT_OR_RETURN(axis, perm.size()); ctx->NewBuilder().Split(ctx->inputs(), axis).Split(ctx->outputs(), i).Build(); } ctx->NewBuilder().PartialSum(ctx->inputs()).PartialSum(ctx->outputs()).Build(); diff --git a/oneflow/user/ops/unpack_op.cpp b/oneflow/user/ops/unpack_op.cpp index e8a34bf6f..7c5dba7ea 100644 --- a/oneflow/user/ops/unpack_op.cpp +++ b/oneflow/user/ops/unpack_op.cpp @@ -26,9 +26,9 @@ REGISTER_USER_OP("unpack") .SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> { const user_op::TensorDesc& in_desc = ctx->InputTensorDesc("in", 0); const Shape& in_shape = in_desc.shape(); - CHECK_GT(in_shape.NumAxes(), 0); + CHECK_GT_OR_RETURN(in_shape.NumAxes(), 0); const auto unpack_num = ctx->Attr<int32_t>("unpack_num"); - CHECK_EQ(in_shape.At(0) % unpack_num, 0); + CHECK_EQ_OR_RETURN(in_shape.At(0) % unpack_num, 0); user_op::TensorDesc* out_desc = ctx->OutputTensorDesc("out", 0); *out_desc->mut_shape() = in_desc.shape(); out_desc->mut_shape()->Set(0, in_shape.At(0) / unpack_num); -- GitLab