diff --git a/oneflow/user/kernels/same_padding_kernel.cpp b/oneflow/user/kernels/same_padding_kernel.cpp
index 138fe30d3c520e05e5680c39ef8ccf8368a0a2ba..241f30d7e7a4449abfc502aa05a54c79191422a8 100644
--- a/oneflow/user/kernels/same_padding_kernel.cpp
+++ b/oneflow/user/kernels/same_padding_kernel.cpp
@@ -42,8 +42,9 @@ class SamePaddingKernel final : public user_op::OpKernel {
     for (int32_t i = 0; i < num_spatial_dims; ++i) {
       int32_t padding_small = 0;
       int32_t padding_large = 0;
-      CalcSamePadding(x->shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i),
-                      strides.at(i), &padding_small, &padding_large);
+      CHECK_JUST(CalcSamePadding(x->shape().At(idx_offset + i), kernel_size.at(i),
+                                 dilation_rate.at(i), strides.at(i), &padding_small,
+                                 &padding_large));
       if (padding == "same_lower") {
         padding_before[idx_offset + i] = padding_large;
       } else if (padding == "same_upper") {
@@ -123,8 +124,9 @@ class SamePaddingGradKernel final : public user_op::OpKernel {
     for (int32_t i = 0; i < num_spatial_dims; ++i) {
       int32_t padding_small = 0;
       int32_t padding_large = 0;
-      CalcSamePadding(dx->shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i),
-                      strides.at(i), &padding_small, &padding_large);
+      CHECK_JUST(CalcSamePadding(dx->shape().At(idx_offset + i), kernel_size.at(i),
+                                 dilation_rate.at(i), strides.at(i), &padding_small,
+                                 &padding_large));
       if (padding == "same_lower") {
         padding_before[idx_offset + i] = padding_large;
       } else if (padding == "same_upper") {
diff --git a/oneflow/user/ops/cast_to_tick_op.cpp b/oneflow/user/ops/cast_to_tick_op.cpp
index c5240419ffd35b35476e093f62d3a7773b717f6d..36ed5c3b5404dec12e1d92384df8c48a36ec8223 100644
--- a/oneflow/user/ops/cast_to_tick_op.cpp
+++ b/oneflow/user/ops/cast_to_tick_op.cpp
@@ -37,7 +37,7 @@ REGISTER_NO_GRAD_USER_OP("cast_to_tick")
           const cfg::ParallelDistribution& in_dis_hint =
               ctx->ParallelDistributionHint4InputArgNameAndIndex("in", 0);
           const Shape& parallel_hierarchy = ctx->parallel_hierarchy();
-          CHECK_EQ(in_dis_hint.sbp_parallel_size(), parallel_hierarchy.NumAxes());
+          CHECK_EQ_OR_RETURN(in_dis_hint.sbp_parallel_size(), parallel_hierarchy.NumAxes());
 
           cfg::ParallelDistribution* in_distribution =
               ctx->ParallelDistribution4ArgNameAndIndex("in", 0);
diff --git a/oneflow/user/ops/conv_op.cpp b/oneflow/user/ops/conv_op.cpp
index e29c110eee383e8cf05c10022a3fcc4489b16a6e..6012a53ef0198bf6d9f100dbb133506c678b4c38 100644
--- a/oneflow/user/ops/conv_op.cpp
+++ b/oneflow/user/ops/conv_op.cpp
@@ -23,7 +23,7 @@ namespace {
 template<size_t NDims>
 Maybe<void> InferTensorDesc4Conv(user_op::InferContext* ctx) {
   const user_op::TensorDesc& in = ctx->InputTensorDesc("in", 0);
-  CHECK_EQ(NDims + 2, in.shape().NumAxes());
+  CHECK_EQ_OR_RETURN(NDims + 2, in.shape().NumAxes());
 
   auto data_format = ctx->Attr<std::string>("data_format");
   auto kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size");
@@ -44,8 +44,8 @@ Maybe<void> InferTensorDesc4Conv(user_op::InferContext* ctx) {
     const size_t c_dim = data_format == "channels_first" ? 1 : NDims + 1;
     out_shape.at(c_dim) = filters;
     for (int32_t i = 0; i < NDims; ++i) {
-      CalcConvOut(in.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i),
-                  strides.at(i), padding_before.at(i), &out_shape.at(idx_offset + i));
+      JUST(CalcConvOut(in.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i),
+                       strides.at(i), padding_before.at(i), &out_shape.at(idx_offset + i)));
     }
     *out->mut_is_dynamic() = in.is_dynamic();
     *out->mut_shape() = Shape(out_shape);
diff --git a/oneflow/user/ops/deconv_op.cpp b/oneflow/user/ops/deconv_op.cpp
index bf0532b6d38bb59ed8ea47ca33864ff0cc3dabff..387b3da2fe1edf79c4630e9dd83fec8bf42dc513 100644
--- a/oneflow/user/ops/deconv_op.cpp
+++ b/oneflow/user/ops/deconv_op.cpp
@@ -23,7 +23,7 @@ namespace {
 template<size_t NDims>
 Maybe<void> InferTensorDesc4DeConv(user_op::InferContext* ctx) {
   const user_op::TensorDesc& in = ctx->InputTensorDesc("in", 0);
-  CHECK_EQ(NDims + 2, in.shape().NumAxes());
+  CHECK_EQ_OR_RETURN(NDims + 2, in.shape().NumAxes());
 
   const std::string& data_format = ctx->Attr<std::string>("data_format");
   const auto& kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size");
@@ -69,7 +69,7 @@ Maybe<void> InferTensorDesc4DeConv(user_op::InferContext* ctx) {
     for (size_t i = 0; i < NDims; ++i) { weight_shape.at(idx_offset + i) = kernel_size.at(i); }
 
     const user_op::TensorDesc& weight = ctx->InputTensorDesc("weight", 0);
-    CHECK_EQ(weight.shape(), Shape(weight_shape));
+    CHECK_EQ_OR_RETURN(weight.shape(), Shape(weight_shape));
   }
 
   return Maybe<void>::Ok();
diff --git a/oneflow/user/ops/elu_op.cpp b/oneflow/user/ops/elu_op.cpp
index fd3ab133d114ddbd7e80c04802bf2174831322b9..13cf0de77ec858f1c333f36b21869e1f4f26c606 100644
--- a/oneflow/user/ops/elu_op.cpp
+++ b/oneflow/user/ops/elu_op.cpp
@@ -51,7 +51,7 @@ REGISTER_USER_OP("elu_grad")
       const Shape& x_shape = ctx->InputShape("x", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == x_shape);
+      CHECK_OR_RETURN(dy_shape == x_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp b/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp
index 88e3fc591632d3f1a078aac67c07780676432e24..f0905fd8f983a4b289ccbf001605abe1356a9cc9 100644
--- a/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp
+++ b/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp
@@ -51,7 +51,7 @@ REGISTER_USER_OP("fused_tril_scale_softmax_mask_scale")
     })
     .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> {
       const user_op::TensorDesc& x_tensor = ctx->LogicalTensorDesc4InputArgNameAndIndex("x", 0);
-      CHECK_GE(x_tensor.shape().NumAxes(), 2);
+      CHECK_GE_OR_RETURN(x_tensor.shape().NumAxes(), 2);
       FOR_RANGE(int64_t, axis, 0, x_tensor.shape().NumAxes() - 2) {
         ctx->NewBuilder()
             .Split(user_op::OpArg("x", 0), axis)
@@ -75,7 +75,7 @@ REGISTER_USER_OP("fused_tril_scale_softmax_mask_scale_grad")
       const user_op::TensorDesc& softmax_y_desc = ctx->InputTensorDesc("softmax_y", 0);
       const user_op::TensorDesc& dy_desc = ctx->InputTensorDesc("dy", 0);
       user_op::TensorDesc* dx_desc = ctx->OutputTensorDesc("dx", 0);
-      CHECK(dy_desc.shape() == softmax_y_desc.shape());
+      CHECK_OR_RETURN(dy_desc.shape() == softmax_y_desc.shape());
       *dx_desc->mut_shape() = dy_desc.shape();
       *dx_desc->mut_is_dynamic() = dy_desc.is_dynamic();
       return Maybe<void>::Ok();
@@ -84,13 +84,13 @@ REGISTER_USER_OP("fused_tril_scale_softmax_mask_scale_grad")
       const user_op::TensorDesc& softmax_y_desc = ctx->InputTensorDesc("softmax_y", 0);
       const user_op::TensorDesc& dy_desc = ctx->InputTensorDesc("dy", 0);
       user_op::TensorDesc* dx_desc = ctx->OutputTensorDesc("dx", 0);
-      CHECK(dy_desc.data_type() == softmax_y_desc.data_type());
+      CHECK_OR_RETURN(dy_desc.data_type() == softmax_y_desc.data_type());
       *dx_desc->mut_data_type() = dy_desc.data_type();
       return Maybe<void>::Ok();
     })
     .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> {
       const user_op::TensorDesc& dy_tensor = ctx->LogicalTensorDesc4InputArgNameAndIndex("dy", 0);
-      CHECK_GE(dy_tensor.shape().NumAxes(), 2);
+      CHECK_GE_OR_RETURN(dy_tensor.shape().NumAxes(), 2);
       FOR_RANGE(int64_t, axis, 0, dy_tensor.shape().NumAxes() - 2) {
         ctx->NewBuilder()
             .Split(user_op::OpArg("softmax_y", 0), axis)
diff --git a/oneflow/user/ops/gelu_op.cpp b/oneflow/user/ops/gelu_op.cpp
index 902fadbded41e9e128043d7962b721574b2831f8..d2f374052b55219a12838303b7ddc3f505a968f8 100644
--- a/oneflow/user/ops/gelu_op.cpp
+++ b/oneflow/user/ops/gelu_op.cpp
@@ -49,7 +49,7 @@ REGISTER_USER_OP("gelu_grad")
       const Shape& x_shape = ctx->InputShape("x", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == x_shape);
+      CHECK_OR_RETURN(dy_shape == x_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/hardsigmoid_op.cpp b/oneflow/user/ops/hardsigmoid_op.cpp
index 1ec34740e6841e5d3f63ade9f882f6f54f4a36bb..cdf43671ab22f92f0d60c84d89c632ebc52c2fe3 100644
--- a/oneflow/user/ops/hardsigmoid_op.cpp
+++ b/oneflow/user/ops/hardsigmoid_op.cpp
@@ -51,7 +51,7 @@ REGISTER_USER_OP("hardsigmoid_grad")
       const Shape& x_shape = ctx->InputShape("x", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == x_shape);
+      CHECK_OR_RETURN(dy_shape == x_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/hardswish_op.cpp b/oneflow/user/ops/hardswish_op.cpp
index ffef66123b65066e042ab4e5f821b3febe6e92ee..45d0ebe230c5940fc9e4bfa730846a03193f0bb2 100644
--- a/oneflow/user/ops/hardswish_op.cpp
+++ b/oneflow/user/ops/hardswish_op.cpp
@@ -49,7 +49,7 @@ REGISTER_USER_OP("hardswish_grad")
       const Shape& x_shape = ctx->InputShape("x", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == x_shape);
+      CHECK_OR_RETURN(dy_shape == x_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/hardtanh_op.cpp b/oneflow/user/ops/hardtanh_op.cpp
index 32e0a69fb9abb4a28f2bf2357cc1a400ae4993c7..2962c49e99eb1ddd8f1f61ce703d7d47e23a75a7 100644
--- a/oneflow/user/ops/hardtanh_op.cpp
+++ b/oneflow/user/ops/hardtanh_op.cpp
@@ -30,7 +30,7 @@ REGISTER_USER_OP("hardtanh")
       *out_shape = in_shape;
       double min_val = ctx->Attr<double>("min_val");
       double max_val = ctx->Attr<double>("max_val");
-      CHECK_LE(min_val, max_val);
+      CHECK_LE_OR_RETURN(min_val, max_val);
       return Maybe<void>::Ok();
     })
     .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> {
@@ -58,11 +58,11 @@ REGISTER_USER_OP("hardtanh_grad")
       const Shape& y_shape = ctx->InputShape("y", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == y_shape);
+      CHECK_OR_RETURN(dy_shape == y_shape);
       *dx_shape = dy_shape;
       double min_val = ctx->Attr<double>("min_val");
       double max_val = ctx->Attr<double>("max_val");
-      CHECK_LE(min_val, max_val);
+      CHECK_LE_OR_RETURN(min_val, max_val);
       return Maybe<void>::Ok();
     })
     .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> {
diff --git a/oneflow/user/ops/layer_norm_op.cpp b/oneflow/user/ops/layer_norm_op.cpp
index f797d4eea8a7b2466eb2577977b699cd3f21e2bd..ded63f98ade969a5b2154e1d79f2fbeb5b051fb7 100644
--- a/oneflow/user/ops/layer_norm_op.cpp
+++ b/oneflow/user/ops/layer_norm_op.cpp
@@ -140,7 +140,7 @@ REGISTER_USER_OP("layer_norm_grad")
       user_op::TensorDesc* dx = ctx->OutputTensorDesc("dx", 0);
       CHECK_EQ_OR_RETURN(dy.shape(), x.shape());
       const int64_t begin_norm_axis = ctx->Attr<int64_t>("begin_norm_axis");
-      CHECK_GT(begin_norm_axis, 0);
+      CHECK_GT_OR_RETURN(begin_norm_axis, 0);
       const Shape& bn_param_shape = InferBnParamShape(x.shape(), begin_norm_axis);
       CHECK_EQ_OR_RETURN(mean.shape(), bn_param_shape);
       CHECK_EQ_OR_RETURN(inv_variance.shape(), bn_param_shape);
diff --git a/oneflow/user/ops/leaky_relu_op.cpp b/oneflow/user/ops/leaky_relu_op.cpp
index a0d65badd91f4ba89da1dafda45f3197dac3d40f..f48b34aadd5681fe0ee39932439f3c0e1ddd1c29 100644
--- a/oneflow/user/ops/leaky_relu_op.cpp
+++ b/oneflow/user/ops/leaky_relu_op.cpp
@@ -48,7 +48,7 @@ REGISTER_USER_OP("leaky_relu_grad")
       const Shape& x_shape = ctx->InputShape("x", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == x_shape);
+      CHECK_OR_RETURN(dy_shape == x_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/nn_util.cpp b/oneflow/user/ops/nn_util.cpp
index 52d031d6c33fc1083106fd84c8ab5127bd62b1e0..a9c2123462605484af9c6aac75a1884df9c6c160 100644
--- a/oneflow/user/ops/nn_util.cpp
+++ b/oneflow/user/ops/nn_util.cpp
@@ -17,11 +17,11 @@ limitations under the License.
 
 namespace oneflow {
 
-void CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
-                       int32_t stride, const std::string& padding_type, int64_t* output_size,
-                       int32_t* padding_before, int32_t* padding_after) {
-  CHECK_GT(stride, 0);
-  CHECK_GE(dilation_rate, 1);
+Maybe<void> CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
+                              int32_t stride, const std::string& padding_type, int64_t* output_size,
+                              int32_t* padding_before, int32_t* padding_after) {
+  CHECK_GT_OR_RETURN(stride, 0);
+  CHECK_GE_OR_RETURN(dilation_rate, 1);
 
   int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
   if (padding_type == "valid") {
@@ -41,13 +41,14 @@ void CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation
   } else {
     UNIMPLEMENTED();
   }
-  if (output_size) { CHECK_GE((*output_size), 0); }
+  if (output_size) { CHECK_GE_OR_RETURN((*output_size), 0); }
+  return Maybe<void>::Ok();
 }
 
-void CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride,
-                     int32_t* padding_small, int32_t* padding_large) {
-  CHECK_GT(stride, 0);
-  CHECK_GE(dilation_rate, 1);
+Maybe<void> CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
+                            int32_t stride, int32_t* padding_small, int32_t* padding_large) {
+  CHECK_GT_OR_RETURN(stride, 0);
+  CHECK_GE_OR_RETURN(dilation_rate, 1);
 
   int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
   int64_t tmp_output_size = (input_size + stride - 1) / stride;
@@ -55,18 +56,20 @@ void CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_r
       0, static_cast<int32_t>((tmp_output_size - 1) * stride + effective_filter_size - input_size));
   if (padding_small) { *padding_small = padding_needed / 2; }
   if (padding_large) { *padding_large = padding_needed - padding_needed / 2; }
+  return Maybe<void>::Ok();
 }
 
-void CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride,
-                 int32_t padding_before, int64_t* output_size) {
-  CHECK_GT(stride, 0);
-  CHECK_GE(dilation_rate, 1);
+Maybe<void> CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
+                        int32_t stride, int32_t padding_before, int64_t* output_size) {
+  CHECK_GT_OR_RETURN(stride, 0);
+  CHECK_GE_OR_RETURN(dilation_rate, 1);
 
   int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
   if (output_size) {
     *output_size = (input_size + 2 * padding_before - effective_filter_size + stride) / stride;
-    CHECK_GE((*output_size), 0);
+    CHECK_GE_OR_RETURN((*output_size), 0);
   }
+  return Maybe<void>::Ok();
 }
 
 const size_t IdxOffset(const std::string& data_format) {
diff --git a/oneflow/user/ops/nn_util.h b/oneflow/user/ops/nn_util.h
index 002a829385588db4196c02cd5bb8a2fa0f2036ad..c6bd15c317d9dbb20a646dd4eef3cf41d858a482 100644
--- a/oneflow/user/ops/nn_util.h
+++ b/oneflow/user/ops/nn_util.h
@@ -20,15 +20,15 @@ limitations under the License.
 
 namespace oneflow {
 
-void CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
-                       int32_t stride, const std::string& padding_type, int64_t* output_size,
-                       int32_t* padding_before, int32_t* padding_after);
+Maybe<void> CalcOutAndPadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
+                              int32_t stride, const std::string& padding_type, int64_t* output_size,
+                              int32_t* padding_before, int32_t* padding_after);
 
-void CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride,
-                     int32_t* padding_small, int32_t* padding_large);
+Maybe<void> CalcSamePadding(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
+                            int32_t stride, int32_t* padding_small, int32_t* padding_large);
 
-void CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate, int32_t stride,
-                 int32_t padding_before, int64_t* output_size);
+Maybe<void> CalcConvOut(int64_t input_size, int32_t filter_size, int32_t dilation_rate,
+                        int32_t stride, int32_t padding_before, int64_t* output_size);
 
 const size_t IdxOffset(const std::string& data_format);
 const int32_t ChannelIdx(const std::string& data_format, int32_t num_axes);
diff --git a/oneflow/user/ops/normalization_op.cpp b/oneflow/user/ops/normalization_op.cpp
index e31d8fce0ed06bb8d03ec656e743af6bbdfcf760..e61e65e87dc254ed8234e9944333732e5c30f32f 100644
--- a/oneflow/user/ops/normalization_op.cpp
+++ b/oneflow/user/ops/normalization_op.cpp
@@ -146,7 +146,7 @@ user_op::TensorDescInferFn MakeFwTensorDescInferFn(
     JUST(SetParamTensorDesc("mean"));
     JUST(SetParamTensorDesc("inv_variance"));
     if (ctx->has_output("reserve_space", 0)) {
-      CHECK(reserve_space_infer_fn);
+      CHECK_OR_RETURN(reserve_space_infer_fn);
       reserve_space_infer_fn(ctx, &x, ctx->OutputTensorDesc("reserve_space", 0));
     }
     return Maybe<void>::Ok();
@@ -178,7 +178,7 @@ user_op::DataTypeInferFn MakeFwDataTypeInferFn(
     JUST(SetParamDataType("mean"));
     JUST(SetParamDataType("inv_variance"));
     if (ctx->has_output("reserve_space", 0)) {
-      CHECK(reserve_space_infer_fn);
+      CHECK_OR_RETURN(reserve_space_infer_fn);
       reserve_space_infer_fn(ctx, &x, ctx->OutputTensorDesc("reserve_space", 0));
     }
     return Maybe<void>::Ok();
diff --git a/oneflow/user/ops/pack_op.cpp b/oneflow/user/ops/pack_op.cpp
index 62f8d04b2c93337c2a910f9bef3210f87ef4a7c8..97920bf529d8e71e5db6b2be02aeac15c2778190 100644
--- a/oneflow/user/ops/pack_op.cpp
+++ b/oneflow/user/ops/pack_op.cpp
@@ -26,7 +26,7 @@ REGISTER_USER_OP("pack")
     .SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
       const user_op::TensorDesc& in_desc = ctx->InputTensorDesc("in", 0);
       const Shape& in_shape = in_desc.shape();
-      CHECK_GT(in_shape.NumAxes(), 0);
+      CHECK_GT_OR_RETURN(in_shape.NumAxes(), 0);
       user_op::TensorDesc* out_desc = ctx->OutputTensorDesc("out", 0);
       *out_desc->mut_is_dynamic() = in_desc.is_dynamic();
       *out_desc->mut_shape() = in_desc.shape();
diff --git a/oneflow/user/ops/pad_op.cpp b/oneflow/user/ops/pad_op.cpp
index 27604575e09059005df78ec8c23ebce151b8ec66..a446b29ce90caee99b42b21be1208ae9d1f5151f 100644
--- a/oneflow/user/ops/pad_op.cpp
+++ b/oneflow/user/ops/pad_op.cpp
@@ -29,8 +29,8 @@ REGISTER_USER_OP("pad")
       const Shape& x_shape = ctx->InputShape("x", 0);
       const auto& padding_before = ctx->Attr<std::vector<int64_t>>("padding_before");
       const auto& padding_after = ctx->Attr<std::vector<int64_t>>("padding_after");
-      CHECK_EQ(padding_before.size(), x_shape.NumAxes());
-      CHECK_EQ(padding_after.size(), x_shape.NumAxes());
+      CHECK_EQ_OR_RETURN(padding_before.size(), x_shape.NumAxes());
+      CHECK_EQ_OR_RETURN(padding_after.size(), x_shape.NumAxes());
       DimVector y_dim_vec(x_shape.NumAxes());
       FOR_RANGE(int64_t, i, 0, x_shape.NumAxes()) {
         y_dim_vec[i] = x_shape.At(i) + padding_before[i] + padding_after[i];
@@ -68,8 +68,8 @@ REGISTER_USER_OP("pad_grad")
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       const auto& padding_before = ctx->Attr<std::vector<int64_t>>("padding_before");
       const auto& padding_after = ctx->Attr<std::vector<int64_t>>("padding_after");
-      CHECK_EQ(padding_before.size(), dy_shape.NumAxes());
-      CHECK_EQ(padding_after.size(), dy_shape.NumAxes());
+      CHECK_EQ_OR_RETURN(padding_before.size(), dy_shape.NumAxes());
+      CHECK_EQ_OR_RETURN(padding_after.size(), dy_shape.NumAxes());
       DimVector dx_dim_vec(dy_shape.NumAxes());
       FOR_RANGE(int64_t, i, 0, dy_shape.NumAxes()) {
         dx_dim_vec[i] = dy_shape.At(i) - padding_before[i] - padding_after[i];
diff --git a/oneflow/user/ops/parallel_cast_op.cpp b/oneflow/user/ops/parallel_cast_op.cpp
index 70fcc6a11d88e9df97d3ffe5d2e80477731b1013..b31cf919f4b36c38d529d88ea44b30705cc58a88 100644
--- a/oneflow/user/ops/parallel_cast_op.cpp
+++ b/oneflow/user/ops/parallel_cast_op.cpp
@@ -67,7 +67,7 @@ REGISTER_USER_OP_GRAD("parallel_cast")
           ctx->FwOp().BindGradTensorWithOpInput(ctx->FwOp().GetGradTensorWithOpOutput("out", 0),
                                                 "in", 0);
         } else {
-          CHECK(IsValidSbpParallelString(grad_sbp_parallel_str));
+          CHECK_OR_RETURN(IsValidSbpParallelString(grad_sbp_parallel_str));
           const std::string grad_op_name = "System-AutoGrad-" + ctx->FwOp().op_name();
           ctx->DefineOp(grad_op_name, [&](user_op::BackwardOpBuilder& builder) {
             return builder.OpTypeName("parallel_cast")
diff --git a/oneflow/user/ops/relu_op.cpp b/oneflow/user/ops/relu_op.cpp
index f9c4d75824b50e7f335f0cad45570658f8e22b16..d2ae5b6bf23328eec4a3a664d9414066683f1186 100644
--- a/oneflow/user/ops/relu_op.cpp
+++ b/oneflow/user/ops/relu_op.cpp
@@ -51,7 +51,7 @@ REGISTER_USER_OP("relu_grad")
       const Shape& y_shape = ctx->InputShape("y", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == y_shape);
+      CHECK_OR_RETURN(dy_shape == y_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/reshape_user_op_util.cpp b/oneflow/user/ops/reshape_user_op_util.cpp
index f570b5d88a24394210da6a49eb46714a08622c72..5ac9a1ddd656e197c626d2a47ba3f2f97745e83a 100644
--- a/oneflow/user/ops/reshape_user_op_util.cpp
+++ b/oneflow/user/ops/reshape_user_op_util.cpp
@@ -70,7 +70,7 @@ Maybe<void> ReshapeUserOpUtil::GetGroupStartInAxis2OutAxis(
     HashMap<int, int>* group_start_in_axis2out_axis) {
   CHECK_NE_OR_RETURN(in_shape.NumAxes(), 0);
   CHECK_NE_OR_RETURN(out_shape.NumAxes(), 0);
-  CHECK_EQ(in_shape.elem_cnt(), out_shape.elem_cnt());
+  CHECK_EQ_OR_RETURN(in_shape.elem_cnt(), out_shape.elem_cnt());
   int in_axis = in_shape.NumAxes() - 1;
   int out_axis = out_shape.NumAxes() - 1;
   while (in_axis >= 0 && out_axis >= 0) {
diff --git a/oneflow/user/ops/same_padding_op.cpp b/oneflow/user/ops/same_padding_op.cpp
index ab516f0bb2a36368f952f8147888678f98e4693a..b54d705df654a6e6e7d90933131db0859915686e 100644
--- a/oneflow/user/ops/same_padding_op.cpp
+++ b/oneflow/user/ops/same_padding_op.cpp
@@ -20,6 +20,34 @@ limitations under the License.
 namespace oneflow {
 namespace user_op {
 
+namespace {
+Maybe<void> SamePaddingTensorDescInferFn(user_op::InferContext* ctx) {
+  const TensorDesc& x_desc = ctx->InputTensorDesc("x", 0);
+  TensorDesc* y_desc = ctx->OutputTensorDesc("y", 0);
+  *y_desc->mut_shape() = x_desc.shape();
+  *y_desc->mut_is_dynamic() = x_desc.is_dynamic();
+  const std::string& data_format = ctx->Attr<std::string>("data_format");
+  const auto& kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size");
+  const auto& strides = ctx->Attr<std::vector<int32_t>>("strides");
+  const auto& dilation_rate = ctx->Attr<std::vector<int32_t>>("dilation_rate");
+  const size_t idx_offset = IdxOffset(data_format);
+  const int32_t num_spatial_dims = x_desc.shape().NumAxes() - 2;
+  CHECK_EQ_OR_RETURN(num_spatial_dims, kernel_size.size());
+  CHECK_EQ_OR_RETURN(num_spatial_dims, strides.size());
+  CHECK_EQ_OR_RETURN(num_spatial_dims, dilation_rate.size());
+  DimVector y_dim_vec(x_desc.shape().dim_vec());
+  for (int32_t i = 0; i < num_spatial_dims; ++i) {
+    int32_t padding_small = 0;
+    int32_t padding_large = 0;
+    JUST(CalcSamePadding(x_desc.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i),
+                         strides.at(i), &padding_small, &padding_large));
+    y_dim_vec[idx_offset + i] = x_desc.shape().At(idx_offset + i) + padding_small + padding_large;
+  }
+  *y_desc->mut_shape() = Shape(y_dim_vec);
+  return Maybe<void>::Ok();
+}
+}  // namespace
+
 REGISTER_USER_OP("same_padding")
     .Input("x")
     .Output("y")
@@ -28,32 +56,7 @@ REGISTER_USER_OP("same_padding")
     .Attr<std::vector<int32_t>>("kernel_size")
     .Attr<std::vector<int32_t>>("strides")
     .Attr<std::vector<int32_t>>("dilation_rate")
-    .SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
-      const TensorDesc& x_desc = ctx->InputTensorDesc("x", 0);
-      TensorDesc* y_desc = ctx->OutputTensorDesc("y", 0);
-      *y_desc->mut_shape() = x_desc.shape();
-      *y_desc->mut_is_dynamic() = x_desc.is_dynamic();
-      const std::string& data_format = ctx->Attr<std::string>("data_format");
-      const auto& kernel_size = ctx->Attr<std::vector<int32_t>>("kernel_size");
-      const auto& strides = ctx->Attr<std::vector<int32_t>>("strides");
-      const auto& dilation_rate = ctx->Attr<std::vector<int32_t>>("dilation_rate");
-      const size_t idx_offset = IdxOffset(data_format);
-      const int32_t num_spatial_dims = x_desc.shape().NumAxes() - 2;
-      CHECK_EQ_OR_RETURN(num_spatial_dims, kernel_size.size());
-      CHECK_EQ_OR_RETURN(num_spatial_dims, strides.size());
-      CHECK_EQ_OR_RETURN(num_spatial_dims, dilation_rate.size());
-      DimVector y_dim_vec(x_desc.shape().dim_vec());
-      for (int32_t i = 0; i < num_spatial_dims; ++i) {
-        int32_t padding_small = 0;
-        int32_t padding_large = 0;
-        CalcSamePadding(x_desc.shape().At(idx_offset + i), kernel_size.at(i), dilation_rate.at(i),
-                        strides.at(i), &padding_small, &padding_large);
-        y_dim_vec[idx_offset + i] =
-            x_desc.shape().At(idx_offset + i) + padding_small + padding_large;
-      }
-      *y_desc->mut_shape() = Shape(y_dim_vec);
-      return Maybe<void>::Ok();
-    })
+    .SetTensorDescInferFn(SamePaddingTensorDescInferFn)
     .SetGetSbpFn([](user_op::SbpContext* ctx) -> Maybe<void> {
       const int32_t num_axes =
           ctx->LogicalTensorDesc4InputArgNameAndIndex("x_like", 0).shape().NumAxes();
diff --git a/oneflow/user/ops/sigmoid_op.cpp b/oneflow/user/ops/sigmoid_op.cpp
index 6d0e393c7ad23a03c9d6ad2c9fa93f1922a358ac..3af60af6440cf2c824a80ac973e9930a32ea94f9 100644
--- a/oneflow/user/ops/sigmoid_op.cpp
+++ b/oneflow/user/ops/sigmoid_op.cpp
@@ -51,7 +51,7 @@ REGISTER_USER_OP("sigmoid_grad")
       const Shape& y_shape = ctx->InputShape("y", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == y_shape);
+      CHECK_OR_RETURN(dy_shape == y_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/softmax_op.cpp b/oneflow/user/ops/softmax_op.cpp
index 67491a6cc3da5c8f6d61b53f39fc4cebc071c181..e4c8ad8b730ce5a2bcab833e8d436182583da2a3 100644
--- a/oneflow/user/ops/softmax_op.cpp
+++ b/oneflow/user/ops/softmax_op.cpp
@@ -49,7 +49,7 @@ REGISTER_USER_OP("softmax_grad")
       const Shape& y_shape = ctx->InputShape("y", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == y_shape);
+      CHECK_OR_RETURN(dy_shape == y_shape);
       *dx_shape = dy_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/squeeze_op.cpp b/oneflow/user/ops/squeeze_op.cpp
index 9cc78594c1e100253100d8e45bd0dfd3b63ba10c..33691aab8a435dfc8fa9c156f44f8092f6b9c738 100644
--- a/oneflow/user/ops/squeeze_op.cpp
+++ b/oneflow/user/ops/squeeze_op.cpp
@@ -23,8 +23,8 @@ Maybe<void> TransformNegativeAxesToPositive(const std::vector<int32_t>& axes_vec
                                             const int32_t num_axes, AxisVector* fixed_axes_vec) {
   fixed_axes_vec->resize(axes_vec.size());
   FOR_RANGE(size_t, i, 0, fixed_axes_vec->size()) {
-    CHECK_GE(axes_vec[i], -num_axes);
-    CHECK_LT(axes_vec[i], num_axes);
+    CHECK_GE_OR_RETURN(axes_vec[i], -num_axes);
+    CHECK_LT_OR_RETURN(axes_vec[i], num_axes);
     fixed_axes_vec->at(i) = axes_vec[i] >= 0 ? axes_vec[i] : axes_vec[i] + num_axes;
   }
   return Maybe<void>::Ok();
diff --git a/oneflow/user/ops/test_ops.cpp b/oneflow/user/ops/test_ops.cpp
index d44cdc73d282673ea643e0b141e0c75922b43350..efbff608f899de7721f1bc22970c0937c5fc3a8b 100644
--- a/oneflow/user/ops/test_ops.cpp
+++ b/oneflow/user/ops/test_ops.cpp
@@ -44,7 +44,7 @@ REGISTER_USER_OP("ccrelu_grad")
       const Shape& y_shape = ctx->InputShape("y", 0);
       const Shape& dy_shape = ctx->InputShape("dy", 0);
       Shape* dx_shape = ctx->OutputShape("dx", 0);
-      CHECK(dy_shape == y_shape);
+      CHECK_OR_RETURN(dy_shape == y_shape);
       *dx_shape = y_shape;
       return Maybe<void>::Ok();
     })
@@ -85,7 +85,7 @@ REGISTER_USER_OP("TestReshape")
       const Shape& in_shape = ctx->InputShape("in", 0);
       Shape* out_shape = ctx->OutputShape("out", 0);
       const Shape& conf_shape = ctx->Attr<Shape>("shape");
-      CHECK_EQ(in_shape.NumAxes(), conf_shape.NumAxes());
+      CHECK_EQ_OR_RETURN(in_shape.NumAxes(), conf_shape.NumAxes());
       *out_shape = conf_shape;
       return Maybe<void>::Ok();
     })
@@ -152,7 +152,7 @@ REGISTER_USER_OP("TestSourceMultiGpuFixedOutNum")
       *out_shape = Shape({bs.At(parallel_ctx.parallel_id()).size()});
 
       const cfg::SbpParallel& out_sbp = ctx->SbpParallel4ArgNameAndIndex("out", 0);
-      CHECK(out_sbp.has_split_parallel() && out_sbp.split_parallel().axis() == 0);
+      CHECK_OR_RETURN(out_sbp.has_split_parallel() && out_sbp.split_parallel().axis() == 0);
       return Maybe<void>::Ok();
     })
     .SetDataTypeInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
@@ -176,7 +176,7 @@ REGISTER_USER_OP("TestMultiInput")
       const Shape& x1_shape = ctx->InputShape("x1", 0);
       const Shape& x2_shape = ctx->InputShape("x2", 0);
       Shape* y_shape = ctx->OutputShape("y", 0);
-      CHECK(x1_shape == x2_shape);
+      CHECK_OR_RETURN(x1_shape == x2_shape);
       *y_shape = x1_shape;
       return Maybe<void>::Ok();
     })
diff --git a/oneflow/user/ops/transpose_ops.cpp b/oneflow/user/ops/transpose_ops.cpp
index 05ed4bc1abfb7326064b82ea9485b1962a6c346b..8a3b849ef7ff3b3a387a42d4ff9638454a2ae761 100644
--- a/oneflow/user/ops/transpose_ops.cpp
+++ b/oneflow/user/ops/transpose_ops.cpp
@@ -55,12 +55,12 @@ REGISTER_USER_OP("transpose")
       const user_op::TensorDesc& input_tensor =
           ctx->LogicalTensorDesc4InputArgNameAndIndex("input", 0);
       const auto& perm = ctx->Attr<std::vector<int32_t>>("perm");
-      CHECK_EQ(perm.size(), input_tensor.shape().NumAxes());
+      CHECK_EQ_OR_RETURN(perm.size(), input_tensor.shape().NumAxes());
       FOR_RANGE(int32_t, i, 0, perm.size()) {
         int32_t axis = perm.at(i);
         if (axis < 0) { axis += perm.size(); }
-        CHECK_GE(axis, 0);
-        CHECK_LT(axis, perm.size());
+        CHECK_GE_OR_RETURN(axis, 0);
+        CHECK_LT_OR_RETURN(axis, perm.size());
         ctx->NewBuilder().Split(ctx->inputs(), axis).Split(ctx->outputs(), i).Build();
       }
       ctx->NewBuilder().PartialSum(ctx->inputs()).PartialSum(ctx->outputs()).Build();
diff --git a/oneflow/user/ops/unpack_op.cpp b/oneflow/user/ops/unpack_op.cpp
index e8a34bf6fa5502c6077391cf7b050a2358a83254..7c5dba7ea829f310101e3be2b6ef1ceed1ba80cd 100644
--- a/oneflow/user/ops/unpack_op.cpp
+++ b/oneflow/user/ops/unpack_op.cpp
@@ -26,9 +26,9 @@ REGISTER_USER_OP("unpack")
     .SetTensorDescInferFn([](user_op::InferContext* ctx) -> Maybe<void> {
       const user_op::TensorDesc& in_desc = ctx->InputTensorDesc("in", 0);
       const Shape& in_shape = in_desc.shape();
-      CHECK_GT(in_shape.NumAxes(), 0);
+      CHECK_GT_OR_RETURN(in_shape.NumAxes(), 0);
       const auto unpack_num = ctx->Attr<int32_t>("unpack_num");
-      CHECK_EQ(in_shape.At(0) % unpack_num, 0);
+      CHECK_EQ_OR_RETURN(in_shape.At(0) % unpack_num, 0);
       user_op::TensorDesc* out_desc = ctx->OutputTensorDesc("out", 0);
       *out_desc->mut_shape() = in_desc.shape();
       out_desc->mut_shape()->Set(0, in_shape.At(0) / unpack_num);