From de29655fdc4c29651023ea192bd28fff47d492db Mon Sep 17 00:00:00 2001
From: liufengwei0103 <2472937968@qq.com>
Date: Sat, 17 Jul 2021 20:53:39 +0800
Subject: [PATCH] gen_bw_fn return maybe (#5454)

* modified SetInputArgModifyFn

* Delete the CHECK changes in the assign_op.cpp file

* Format

* Modified the OutputArgModifyFn interface

* add return

* maybe error stack from CheckAndConstructOp to OutputArgModifier callback function

* maybe error stack from CheckAndConstructOp to OutputArgModifier callback function

* OutputArgModifier return maybe part_1

* maybe error stack from CheckAndConstructOp to OutputArgModifier callback function

* input_arg_modifier return maybe

* gen_bw_fn return maybe

* add MakeGenBackwardOpConf because ofstatement-expression not allowed outside function resulting in JUST failed in lambda

* add maybe after merge master

* fix bug: JUST in lambda

Co-authored-by: aishangjj <702572275@qq.com>
Co-authored-by: oneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
---
 .../core/framework/user_op_grad_registry.h    |  2 +-
 oneflow/core/job_rewriter/user_grad.cpp       |  6 +--
 oneflow/user/kernels/pool_gpu_kernel.cpp      |  1 -
 oneflow/user/ops/add_n_op.cpp                 |  3 +-
 oneflow/user/ops/amp_white_identity_op.cpp    |  4 +-
 oneflow/user/ops/batch_gather_op.cpp          |  4 +-
 oneflow/user/ops/bias_add_op.cpp              |  4 +-
 oneflow/user/ops/broadcast_ops_grad.cpp       | 16 ++++--
 oneflow/user/ops/cast_op.cpp                  |  3 +-
 oneflow/user/ops/cast_to_static_shape_op.cpp  |  4 +-
 oneflow/user/ops/clip_by_value_op.cpp         | 12 +++--
 oneflow/user/ops/combined_margin_loss_op.cpp  |  4 +-
 oneflow/user/ops/concat_op.cpp                |  3 +-
 oneflow/user/ops/conv_op.cpp                  |  7 +--
 oneflow/user/ops/deconv_op.cpp                |  8 +--
 oneflow/user/ops/dropout_op.cpp               |  3 +-
 oneflow/user/ops/expand_dims_op.cpp           |  4 +-
 oneflow/user/ops/expand_op.cpp                |  3 +-
 oneflow/user/ops/fake_quantization_op.cpp     |  4 +-
 oneflow/user/ops/flatten_op.cpp               |  3 +-
 oneflow/user/ops/fused_bias_add_op.cpp        |  8 ++-
 ...fused_scale_tril_softmax_mask_scale_op.cpp |  4 +-
 oneflow/user/ops/gather_op.cpp                |  3 +-
 oneflow/user/ops/gelu_op.cpp                  |  3 +-
 oneflow/user/ops/identity_op.cpp              |  4 +-
 oneflow/user/ops/l2_normalize_op.cpp          |  4 +-
 oneflow/user/ops/layer_norm_op.cpp            |  4 +-
 oneflow/user/ops/leaky_relu_op.cpp            |  4 +-
 .../user/ops/math_binary_elementwise_ops.cpp  | 54 ++++++++++---------
 .../user/ops/math_unary_elementwise_op.cpp    | 28 +++++-----
 oneflow/user/ops/matmul_op.cpp                | 11 ++--
 oneflow/user/ops/multiply_op.cpp              |  4 +-
 oneflow/user/ops/nd_index_slice_ops.cpp       | 16 ++++--
 oneflow/user/ops/nvtx_range_op.cpp            |  8 ++-
 oneflow/user/ops/pad2d_ops.cpp                | 12 +++--
 oneflow/user/ops/pad_op.cpp                   |  7 ++-
 oneflow/user/ops/pool_op.cpp                  |  5 +-
 oneflow/user/ops/pooling_op.cpp               |  5 +-
 oneflow/user/ops/prelu_op.cpp                 |  3 +-
 oneflow/user/ops/reduce_ops.cpp               |  8 ++-
 oneflow/user/ops/reshape_like_op.cpp          |  4 +-
 oneflow/user/ops/reshape_op.cpp               |  3 +-
 oneflow/user/ops/same_padding_op.cpp          |  4 +-
 oneflow/user/ops/scalar_add_op.cpp            |  4 +-
 oneflow/user/ops/scalar_by_tensor_op.cpp      | 16 ++++--
 oneflow/user/ops/scalar_mul_op.cpp            |  4 +-
 oneflow/user/ops/sigmoid_cross_entropy_op.cpp |  4 +-
 oneflow/user/ops/sigmoid_op.cpp               |  3 +-
 oneflow/user/ops/slice_op.cpp                 |  3 +-
 oneflow/user/ops/smooth_l1_loss_op.cpp        |  4 +-
 oneflow/user/ops/softmax_cross_entropy_op.cpp |  4 +-
 oneflow/user/ops/softmax_op.cpp               |  3 +-
 oneflow/user/ops/sparse_cross_entropy_op.cpp  | 13 +++--
 .../ops/sparse_softmax_cross_entropy_op.cpp   | 13 +++--
 oneflow/user/ops/split_like_op.cpp            |  3 +-
 oneflow/user/ops/squeeze_op.cpp               |  3 +-
 oneflow/user/ops/test_ops.cpp                 |  7 ++-
 oneflow/user/ops/transpose_ops.cpp            |  4 +-
 oneflow/user/ops/tril_op.cpp                  |  7 ++-
 oneflow/user/ops/tuple_identity_op.cpp        |  4 +-
 oneflow/user/ops/two_stage_reduce_ops.cpp     | 34 +++++++-----
 .../ops/unsorted_batch_segment_sum_op.cpp     |  4 +-
 oneflow/user/ops/unsorted_segment_sum_op.cpp  |  4 +-
 oneflow/user/ops/upsample_op.cpp              | 32 ++++++++---
 64 files changed, 318 insertions(+), 159 deletions(-)

diff --git a/oneflow/core/framework/user_op_grad_registry.h b/oneflow/core/framework/user_op_grad_registry.h
index a0190e84c..cd0134d72 100644
--- a/oneflow/core/framework/user_op_grad_registry.h
+++ b/oneflow/core/framework/user_op_grad_registry.h
@@ -24,7 +24,7 @@ namespace oneflow {
 namespace user_op {
 
 using AddOpFn = std::function<void(const UserOpConfWrapper&)>;
-using GenBackwardOpConfFn = std::function<void(const UserOpWrapper&, AddOpFn)>;
+using GenBackwardOpConfFn = std::function<Maybe<void>(const UserOpWrapper&, AddOpFn)>;
 using BackwardOpConfGenFn = std::function<void(BackwardOpConfContext*)>;
 
 struct OpGradRegistryResult {
diff --git a/oneflow/core/job_rewriter/user_grad.cpp b/oneflow/core/job_rewriter/user_grad.cpp
index ee6770ba6..9f9ca3668 100644
--- a/oneflow/core/job_rewriter/user_grad.cpp
+++ b/oneflow/core/job_rewriter/user_grad.cpp
@@ -25,7 +25,7 @@ Maybe<void> GenerateBackwardOpConf(
     const Operator& fw_op, std::vector<OperatorConf>* bw_op_confs,
     const std::function<LogicalBlobId*(const std::string&)>& DiffLbi4BnInOp,
     const std::function<const BlobDesc&(const std::string&)>& LogicalBlobDesc4BnInOp) {
-  CHECK(fw_op.op_conf().has_user_conf());
+  CHECK_OR_RETURN(fw_op.op_conf().has_user_conf());
   const UserOpConf& user_conf = fw_op.op_conf().user_conf();
   const user_op::OpGradRegistryResult* val =
       user_op::UserOpRegistryMgr::Get().GetOpGradRegistryResult(user_conf.op_type_name());
@@ -43,13 +43,13 @@ Maybe<void> GenerateBackwardOpConf(
     auto AddOp = [&](const user_op::UserOpConfWrapper& wrapper) {
       bw_op_confs->push_back(wrapper.op_conf());
     };
-    val->gen_bw_fn(fw_user_op, AddOp);
+    JUST(val->gen_bw_fn(fw_user_op, AddOp));
   }
 
   for (const std::string& ibn : fw_op.input_bns()) {
     LogicalBlobId* lbi = DiffLbi4BnInOp(ibn);
     if (lbi != nullptr) {
-      CHECK(lbi->has_op_name() && lbi->has_blob_name())
+      CHECK_OR_RETURN(lbi->has_op_name() && lbi->has_blob_name())
           << " user_op: " << fw_op.op_name() << " op_type_name: " << user_conf.op_type_name()
           << " 's input blob " << ibn << " has not generate input diff blob !";
     }
diff --git a/oneflow/user/kernels/pool_gpu_kernel.cpp b/oneflow/user/kernels/pool_gpu_kernel.cpp
index 53206e544..14e8d935f 100644
--- a/oneflow/user/kernels/pool_gpu_kernel.cpp
+++ b/oneflow/user/kernels/pool_gpu_kernel.cpp
@@ -81,7 +81,6 @@ class GPUPoolOpKernelState final : public user_op::OpKernelState {
   static std::shared_ptr<GPUPoolOpKernelState> FromKernelComputeContext(
       const int32_t& dim, const std::string& pooling_type, user_op::KernelComputeContext* ctx) {
     if (pooling_type != "MAX" && pooling_type != "AVG") { UNIMPLEMENTED(); }
-    const user_op::TensorDesc* x_desc = ctx->TensorDesc4ArgNameAndIndex("x", 0);
     const ShapeView& x_shape = ctx->Tensor4ArgNameAndIndex("x", 0)->shape();
     const std::string& data_format = ctx->Attr<std::string>("data_format");
     const std::string& padding = ctx->Attr<std::string>("padding");
diff --git a/oneflow/user/ops/add_n_op.cpp b/oneflow/user/ops/add_n_op.cpp
index de0b8b508..c835e63fb 100644
--- a/oneflow/user/ops/add_n_op.cpp
+++ b/oneflow/user/ops/add_n_op.cpp
@@ -53,13 +53,14 @@ REGISTER_USER_OP("add_n")
     });
 
 REGISTER_USER_OP_GRAD("add_n").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                         user_op::AddOpFn AddOp) {
+                                                         user_op::AddOpFn AddOp) -> Maybe<void> {
   int32_t in_size = op.input_size("in");
   for (int i = 0; i < in_size; ++i) {
     if (op.NeedGenGradTensor4OpInput("in", i)) {
       op.BindGradTensorWithOpInput(op.GetGradTensorWithOpOutput("out", 0), "in", i);
     }
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/amp_white_identity_op.cpp b/oneflow/user/ops/amp_white_identity_op.cpp
index ca8380554..269b08c3e 100644
--- a/oneflow/user/ops/amp_white_identity_op.cpp
+++ b/oneflow/user/ops/amp_white_identity_op.cpp
@@ -45,7 +45,8 @@ REGISTER_USER_OP("amp_white_identity")
     });
 
 REGISTER_USER_OP_GRAD("amp_white_identity")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -56,6 +57,7 @@ REGISTER_USER_OP_GRAD("amp_white_identity")
         op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace
diff --git a/oneflow/user/ops/batch_gather_op.cpp b/oneflow/user/ops/batch_gather_op.cpp
index 0c9ae8bec..3ac699e48 100644
--- a/oneflow/user/ops/batch_gather_op.cpp
+++ b/oneflow/user/ops/batch_gather_op.cpp
@@ -85,7 +85,8 @@ REGISTER_USER_OP("batch_gather")
     });
 
 REGISTER_USER_OP_GRAD("batch_gather")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       bool need_grad_in = op.NeedGenGradTensor4OpInput("in", 0);
       if (need_grad_in) {
         const Shape in_shape = op.TensorDesc4ArgNameAndIndex("in", 0).shape();
@@ -102,6 +103,7 @@ REGISTER_USER_OP_GRAD("batch_gather")
         op.BindGradTensorWithOpInput(in_grad_op.output("out", 0), "in", 0);
         AddOp(in_grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/bias_add_op.cpp b/oneflow/user/ops/bias_add_op.cpp
index bfbe323bc..da5d7097d 100644
--- a/oneflow/user/ops/bias_add_op.cpp
+++ b/oneflow/user/ops/bias_add_op.cpp
@@ -58,7 +58,8 @@ REGISTER_USER_OP("bias_add")
     });
 
 REGISTER_USER_OP_GRAD("bias_add")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("a", 0)) {
         op.BindGradTensorWithOpInput(op.GetGradTensorWithOpOutput("out", 0), "a", 0);
       }
@@ -79,6 +80,7 @@ REGISTER_USER_OP_GRAD("bias_add")
         AddOp(grad_op);
         op.BindGradTensorWithOpInput(grad_op.output("output_tensor", 0), "b", 0);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/broadcast_ops_grad.cpp b/oneflow/user/ops/broadcast_ops_grad.cpp
index 67f481da6..7b2bcf03d 100644
--- a/oneflow/user/ops/broadcast_ops_grad.cpp
+++ b/oneflow/user/ops/broadcast_ops_grad.cpp
@@ -54,7 +54,8 @@ std::string CreateReduceSumLikeBlob(const std::string& in_lbn, const Shape& in_s
 }  // namespace
 
 REGISTER_USER_OP_GRAD("broadcast_add")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       const Shape& z_shape = op.TensorDesc4ArgNameAndIndex("z", 0).shape();
       const std::string& dz_lbn = op.GetGradTensorWithOpOutput("z", 0);
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
@@ -71,10 +72,12 @@ REGISTER_USER_OP_GRAD("broadcast_add")
             CreateReduceSumLikeBlob(dz_lbn, z_shape, y_lbn, y_shape, op.op_name() + "_y", AddOp);
         op.BindGradTensorWithOpInput(out_lbn, "y", 0);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("broadcast_sub")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       const Shape& z_shape = op.TensorDesc4ArgNameAndIndex("z", 0).shape();
       const std::string& dz_lbn = op.GetGradTensorWithOpOutput("z", 0);
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
@@ -102,10 +105,12 @@ REGISTER_USER_OP_GRAD("broadcast_sub")
             scalar_mul_op.output("out", 0), z_shape, y_lbn, y_shape, op.op_name() + "_y", AddOp);
         op.BindGradTensorWithOpInput(out_lbn, "y", 0);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("broadcast_mul")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       const Shape& z_shape = op.TensorDesc4ArgNameAndIndex("z", 0).shape();
       const std::string& dz_lbn = op.GetGradTensorWithOpOutput("z", 0);
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
@@ -136,10 +141,12 @@ REGISTER_USER_OP_GRAD("broadcast_mul")
             broadcast_mul_op.output("z", 0), z_shape, y_lbn, y_shape, op.op_name() + "_y", AddOp);
         op.BindGradTensorWithOpInput(out_lbn, "y", 0);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("broadcast_div")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       const std::string& dz_lbn = op.GetGradTensorWithOpOutput("z", 0);
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         const Shape& z_shape = op.TensorDesc4ArgNameAndIndex("z", 0).shape();
@@ -167,6 +174,7 @@ REGISTER_USER_OP_GRAD("broadcast_div")
         op.BindGradTensorWithOpInput(grad_op.output("dy", 0), "y", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/cast_op.cpp b/oneflow/user/ops/cast_op.cpp
index 7ff3c0997..2ae1c246b 100644
--- a/oneflow/user/ops/cast_op.cpp
+++ b/oneflow/user/ops/cast_op.cpp
@@ -51,7 +51,7 @@ REGISTER_USER_OP("cast")
     .SetDataTypeInferFn(InferDataType);
 
 REGISTER_USER_OP_GRAD("cast").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                        user_op::AddOpFn AddOp) {
+                                                        user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     const DataType& dtype = op.TensorDesc4ArgNameAndIndex("in", 0).data_type();
@@ -64,6 +64,7 @@ REGISTER_USER_OP_GRAD("cast").SetGenBackwardOpConfFn([](const user_op::UserOpWra
     op.BindGradTensorWithOpInput(cast_grad_op.output("out", 0), "in", 0);
     AddOp(cast_grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace
diff --git a/oneflow/user/ops/cast_to_static_shape_op.cpp b/oneflow/user/ops/cast_to_static_shape_op.cpp
index 0ce4a9667..749f4940b 100644
--- a/oneflow/user/ops/cast_to_static_shape_op.cpp
+++ b/oneflow/user/ops/cast_to_static_shape_op.cpp
@@ -48,7 +48,8 @@ REGISTER_USER_OP("cast_to_static_shape")
     });
 
 REGISTER_USER_OP_GRAD("cast_to_static_shape")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("input", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper identity_op =
@@ -59,6 +60,7 @@ REGISTER_USER_OP_GRAD("cast_to_static_shape")
         op.BindGradTensorWithOpInput(identity_op.output("out", 0), "input", 0);
         AddOp(identity_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/clip_by_value_op.cpp b/oneflow/user/ops/clip_by_value_op.cpp
index 65bbdb26e..acadfc6ca 100644
--- a/oneflow/user/ops/clip_by_value_op.cpp
+++ b/oneflow/user/ops/clip_by_value_op.cpp
@@ -128,7 +128,8 @@ REGISTER_USER_OP("clip_by_scalar_max_grad")
     .SetDataTypeInferFn(InferClipGradDataType);
 
 REGISTER_USER_OP_GRAD("clip_by_scalar")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -144,10 +145,12 @@ REGISTER_USER_OP_GRAD("clip_by_scalar")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("clip_by_scalar_min")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -161,10 +164,12 @@ REGISTER_USER_OP_GRAD("clip_by_scalar_min")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("clip_by_scalar_max")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -178,6 +183,7 @@ REGISTER_USER_OP_GRAD("clip_by_scalar_max")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/combined_margin_loss_op.cpp b/oneflow/user/ops/combined_margin_loss_op.cpp
index 825b59901..d420cb352 100644
--- a/oneflow/user/ops/combined_margin_loss_op.cpp
+++ b/oneflow/user/ops/combined_margin_loss_op.cpp
@@ -106,7 +106,8 @@ REGISTER_USER_OP("combined_margin_loss_grad")
     });
 
 REGISTER_USER_OP_GRAD("combined_margin_loss")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op = builder.Op("combined_margin_loss_grad")
@@ -122,6 +123,7 @@ REGISTER_USER_OP_GRAD("combined_margin_loss")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/concat_op.cpp b/oneflow/user/ops/concat_op.cpp
index 9a7c3350e..253bb465e 100644
--- a/oneflow/user/ops/concat_op.cpp
+++ b/oneflow/user/ops/concat_op.cpp
@@ -68,7 +68,7 @@ Maybe<void> GetSbpSignature(user_op::SbpContext* ctx) {
   return Maybe<void>::Ok();
 }
 
-void GenGrapOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenGrapOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
   bool need_grad = false;
   const int32_t in_size = op.input_size("in");
   FOR_RANGE(int32_t, i, 0, in_size) {
@@ -90,6 +90,7 @@ void GenGrapOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
     }
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
 Maybe<void> InferDataType(user_op::InferContext* ctx) {
diff --git a/oneflow/user/ops/conv_op.cpp b/oneflow/user/ops/conv_op.cpp
index 811d58d7d..e29c110ee 100644
--- a/oneflow/user/ops/conv_op.cpp
+++ b/oneflow/user/ops/conv_op.cpp
@@ -159,7 +159,7 @@ Maybe<void> CheckAttr(const user_op::UserOpDefWrapper& def,
   }
 }
 
-void GenerateBackwardOpConf4Conv(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenerateBackwardOpConf4Conv(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
   const auto& padding_before = op.attr<std::vector<int32_t>>("padding_before");
   std::string data_format = op.attr<std::string>("data_format");
   std::vector<int32_t> kernel_size = op.attr<std::vector<int32_t>>("kernel_size");
@@ -168,8 +168,8 @@ void GenerateBackwardOpConf4Conv(const user_op::UserOpWrapper& op, user_op::AddO
   int32_t groups = op.attr<int32_t>("groups");
 
   int32_t ndims = kernel_size.size();
-  CHECK_EQ(ndims, strides.size());
-  CHECK_EQ(ndims, dilation_rate.size());
+  CHECK_EQ_OR_RETURN(ndims, strides.size());
+  CHECK_EQ_OR_RETURN(ndims, dilation_rate.size());
 
   if (op.user_op_conf().has_input("bias", 0)) {
     if (op.NeedGenGradTensor4OpInput("bias", 0)) {
@@ -224,6 +224,7 @@ void GenerateBackwardOpConf4Conv(const user_op::UserOpWrapper& op, user_op::AddO
     op.BindGradTensorWithOpInput(data_grad_op.output("dx", 0), "in", 0);
     AddOp(data_grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
 }  // namespace
diff --git a/oneflow/user/ops/deconv_op.cpp b/oneflow/user/ops/deconv_op.cpp
index 0ba8355ca..bf0532b6d 100644
--- a/oneflow/user/ops/deconv_op.cpp
+++ b/oneflow/user/ops/deconv_op.cpp
@@ -136,7 +136,8 @@ Maybe<void> CheckAttr(const user_op::UserOpDefWrapper& def,
   }
 }
 
-void GenerateBackwardOpConf4DeConv(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenerateBackwardOpConf4DeConv(const user_op::UserOpWrapper& op,
+                                          user_op::AddOpFn AddOp) {
   const std::string& data_format = op.attr<std::string>("data_format");
   const auto& padding_before = op.attr<std::vector<int32_t>>("padding_before");
   const auto& kernel_size = op.attr<std::vector<int32_t>>("kernel_size");
@@ -145,8 +146,8 @@ void GenerateBackwardOpConf4DeConv(const user_op::UserOpWrapper& op, user_op::Ad
   const Shape& weight_shape = op.TensorDesc4ArgNameAndIndex("weight", 0).shape();
 
   const int32_t ndims = kernel_size.size();
-  CHECK_EQ(ndims, strides.size());
-  CHECK_EQ(ndims, dilation_rate.size());
+  CHECK_EQ_OR_RETURN(ndims, strides.size());
+  CHECK_EQ_OR_RETURN(ndims, dilation_rate.size());
 
   if (op.NeedGenGradTensor4OpInput("weight", 0)) {
     auto filter_grad_op =
@@ -186,6 +187,7 @@ void GenerateBackwardOpConf4DeConv(const user_op::UserOpWrapper& op, user_op::Ad
     op.BindGradTensorWithOpInput(data_grad_op.output("out", 0), "in", 0);
     AddOp(data_grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
 }  // namespace
diff --git a/oneflow/user/ops/dropout_op.cpp b/oneflow/user/ops/dropout_op.cpp
index eb865cd9f..027bd9928 100644
--- a/oneflow/user/ops/dropout_op.cpp
+++ b/oneflow/user/ops/dropout_op.cpp
@@ -93,7 +93,7 @@ REGISTER_USER_OP("dropout_grad")
     });
 
 REGISTER_USER_OP_GRAD("dropout").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                           user_op::AddOpFn AddOp) {
+                                                           user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper dropout_grad_op =
@@ -106,6 +106,7 @@ REGISTER_USER_OP_GRAD("dropout").SetGenBackwardOpConfFn([](const user_op::UserOp
     op.BindGradTensorWithOpInput(dropout_grad_op.output("dx", 0), "in", 0);
     AddOp(dropout_grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 REGISTER_NO_GRAD_USER_OP("random_mask_like")
diff --git a/oneflow/user/ops/expand_dims_op.cpp b/oneflow/user/ops/expand_dims_op.cpp
index 52aae4094..99c1be0f7 100644
--- a/oneflow/user/ops/expand_dims_op.cpp
+++ b/oneflow/user/ops/expand_dims_op.cpp
@@ -67,7 +67,8 @@ REGISTER_USER_OP("expand_dims")
     });
 
 REGISTER_USER_OP_GRAD("expand_dims")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -79,6 +80,7 @@ REGISTER_USER_OP_GRAD("expand_dims")
         op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/expand_op.cpp b/oneflow/user/ops/expand_op.cpp
index 6aa86d393..530f7cad1 100644
--- a/oneflow/user/ops/expand_op.cpp
+++ b/oneflow/user/ops/expand_op.cpp
@@ -67,7 +67,7 @@ REGISTER_USER_OP("expand_grad")
     });
 
 REGISTER_USER_OP_GRAD("expand").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                          user_op::AddOpFn AddOp) {
+                                                          user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op =
@@ -80,6 +80,7 @@ REGISTER_USER_OP_GRAD("expand").SetGenBackwardOpConfFn([](const user_op::UserOpW
     op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/fake_quantization_op.cpp b/oneflow/user/ops/fake_quantization_op.cpp
index 512823f8b..d8fa0242f 100644
--- a/oneflow/user/ops/fake_quantization_op.cpp
+++ b/oneflow/user/ops/fake_quantization_op.cpp
@@ -114,7 +114,8 @@ REGISTER_USER_OP("fake_quantization")
     });
 
 REGISTER_USER_OP_GRAD("fake_quantization")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper identity_op =
@@ -125,6 +126,7 @@ REGISTER_USER_OP_GRAD("fake_quantization")
         op.BindGradTensorWithOpInput(identity_op.output("out", 0), "in", 0);
         AddOp(identity_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace
diff --git a/oneflow/user/ops/flatten_op.cpp b/oneflow/user/ops/flatten_op.cpp
index 869b51cd4..6d9450fbf 100644
--- a/oneflow/user/ops/flatten_op.cpp
+++ b/oneflow/user/ops/flatten_op.cpp
@@ -92,7 +92,7 @@ REGISTER_USER_OP("flatten")
     .SetDataTypeInferFn(DataTypeInferFn);
 
 REGISTER_USER_OP_GRAD("flatten").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                           user_op::AddOpFn AddOp) {
+                                                           user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper reshape_grad_op =
@@ -104,6 +104,7 @@ REGISTER_USER_OP_GRAD("flatten").SetGenBackwardOpConfFn([](const user_op::UserOp
     op.BindGradTensorWithOpInput(reshape_grad_op.output("out", 0), "in", 0);
     AddOp(reshape_grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace
diff --git a/oneflow/user/ops/fused_bias_add_op.cpp b/oneflow/user/ops/fused_bias_add_op.cpp
index b61d473e4..e004226d6 100644
--- a/oneflow/user/ops/fused_bias_add_op.cpp
+++ b/oneflow/user/ops/fused_bias_add_op.cpp
@@ -103,7 +103,8 @@ REGISTER_USER_OP("fused_bias_add_gelu_grad")
     });
 
 REGISTER_USER_OP_GRAD("fused_bias_add_gelu")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("a", 0) || op.NeedGenGradTensor4OpInput("b", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_gelu_grad");
         user_op::UserOpConfWrapper bias_add_gelu_grad_op =
@@ -137,6 +138,7 @@ REGISTER_USER_OP_GRAD("fused_bias_add_gelu")
           op.BindGradTensorWithOpInput(grad_op.output("output_tensor", 0), "b", 0);
         }
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("fused_bias_add_mask_scale")
@@ -192,7 +194,8 @@ REGISTER_USER_OP("fused_bias_add_mask_scale")
     });
 
 REGISTER_USER_OP_GRAD("fused_bias_add_mask_scale")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("a", 0) || op.NeedGenGradTensor4OpInput("b", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_gelu_grad");
         user_op::UserOpConfWrapper dropout_grad_op =
@@ -225,6 +228,7 @@ REGISTER_USER_OP_GRAD("fused_bias_add_mask_scale")
           op.BindGradTensorWithOpInput(grad_op.output("output_tensor", 0), "b", 0);
         }
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp b/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp
index 12fbe80dc..88e3fc591 100644
--- a/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp
+++ b/oneflow/user/ops/fused_scale_tril_softmax_mask_scale_op.cpp
@@ -103,7 +103,8 @@ REGISTER_USER_OP("fused_tril_scale_softmax_mask_scale_grad")
     });
 
 REGISTER_USER_OP_GRAD("fused_tril_scale_softmax_mask_scale")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -119,6 +120,7 @@ REGISTER_USER_OP_GRAD("fused_tril_scale_softmax_mask_scale")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace
diff --git a/oneflow/user/ops/gather_op.cpp b/oneflow/user/ops/gather_op.cpp
index 5f85af9ee..47045ef4c 100644
--- a/oneflow/user/ops/gather_op.cpp
+++ b/oneflow/user/ops/gather_op.cpp
@@ -90,7 +90,7 @@ REGISTER_USER_OP("gather")
     });
 
 REGISTER_USER_OP_GRAD("gather").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                          user_op::AddOpFn AddOp) {
+                                                          user_op::AddOpFn AddOp) -> Maybe<void> {
   bool need_grad_in = op.NeedGenGradTensor4OpInput("in", 0);
   if (need_grad_in) {
     user_op::UserOpConfWrapperBuilder in_grad_builder(op.op_name() + "_grad");
@@ -105,6 +105,7 @@ REGISTER_USER_OP_GRAD("gather").SetGenBackwardOpConfFn([](const user_op::UserOpW
     op.BindGradTensorWithOpInput(in_grad_op.output("out", 0), "in", 0);
     AddOp(in_grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/gelu_op.cpp b/oneflow/user/ops/gelu_op.cpp
index 0056d4f5f..902fadbde 100644
--- a/oneflow/user/ops/gelu_op.cpp
+++ b/oneflow/user/ops/gelu_op.cpp
@@ -76,7 +76,7 @@ REGISTER_USER_OP("gelu_grad")
     });
 
 REGISTER_USER_OP_GRAD("gelu").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                        user_op::AddOpFn AddOp) {
+                                                        user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op = builder.Op("gelu_grad")
@@ -87,6 +87,7 @@ REGISTER_USER_OP_GRAD("gelu").SetGenBackwardOpConfFn([](const user_op::UserOpWra
     op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "in", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/identity_op.cpp b/oneflow/user/ops/identity_op.cpp
index 76040a8b8..2e67cefc4 100644
--- a/oneflow/user/ops/identity_op.cpp
+++ b/oneflow/user/ops/identity_op.cpp
@@ -47,7 +47,8 @@ REGISTER_USER_OP("identity")
     });
 
 REGISTER_USER_OP_GRAD("identity")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper identity_op =
@@ -58,6 +59,7 @@ REGISTER_USER_OP_GRAD("identity")
         op.BindGradTensorWithOpInput(identity_op.output("out", 0), "in", 0);
         AddOp(identity_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace
diff --git a/oneflow/user/ops/l2_normalize_op.cpp b/oneflow/user/ops/l2_normalize_op.cpp
index c296b09f9..1e8abab46 100644
--- a/oneflow/user/ops/l2_normalize_op.cpp
+++ b/oneflow/user/ops/l2_normalize_op.cpp
@@ -108,7 +108,8 @@ REGISTER_USER_OP("l2_normalize_grad")
     });
 
 REGISTER_USER_OP_GRAD("l2_normalize")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -123,6 +124,7 @@ REGISTER_USER_OP_GRAD("l2_normalize")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/layer_norm_op.cpp b/oneflow/user/ops/layer_norm_op.cpp
index f560c933c..f797d4eea 100644
--- a/oneflow/user/ops/layer_norm_op.cpp
+++ b/oneflow/user/ops/layer_norm_op.cpp
@@ -283,7 +283,8 @@ REGISTER_USER_OP("layer_norm_param_grad")
     });
 
 REGISTER_USER_OP_GRAD("layer_norm")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       const bool center = op.attr<bool>("center");
       const bool scale = op.attr<bool>("scale");
       const bool has_beta = center;
@@ -337,6 +338,7 @@ REGISTER_USER_OP_GRAD("layer_norm")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/leaky_relu_op.cpp b/oneflow/user/ops/leaky_relu_op.cpp
index bd08130a2..a0d65badd 100644
--- a/oneflow/user/ops/leaky_relu_op.cpp
+++ b/oneflow/user/ops/leaky_relu_op.cpp
@@ -75,7 +75,8 @@ REGISTER_USER_OP("leaky_relu_grad")
     });
 
 REGISTER_USER_OP_GRAD("leaky_relu")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op = builder.Op("leaky_relu_grad")
@@ -87,6 +88,7 @@ REGISTER_USER_OP_GRAD("leaky_relu")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/math_binary_elementwise_ops.cpp b/oneflow/user/ops/math_binary_elementwise_ops.cpp
index 0f57170fa..f528a7696 100644
--- a/oneflow/user/ops/math_binary_elementwise_ops.cpp
+++ b/oneflow/user/ops/math_binary_elementwise_ops.cpp
@@ -45,32 +45,34 @@ namespace oneflow {
       .MATH_ELEMENTWISE_DEFAULT_SET_FUNC();                                                     \
                                                                                                 \
   REGISTER_USER_OP_GRAD(math_binary_elementwise_type)                                           \
-      .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {    \
-        if (op.NeedGenGradTensor4OpInput("x", 0)) {                                             \
-          user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_x_grad");                  \
-          user_op::UserOpConfWrapper binary_grad_op =                                           \
-              builder.Op(std::string("") + math_binary_elementwise_type + "_x_grad")            \
-                  .Input("x", op.input("x", 0))                                                 \
-                  .Input("y", op.input("y", 0))                                                 \
-                  .Input("dz", op.GetGradTensorWithOpOutput("z", 0))                            \
-                  .Output("dx")                                                                 \
-                  .Build();                                                                     \
-          op.BindGradTensorWithOpInput(binary_grad_op.output("dx", 0), "x", 0);                 \
-          AddOp(binary_grad_op);                                                                \
-        }                                                                                       \
-        if (op.NeedGenGradTensor4OpInput("y", 0)) {                                             \
-          user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_y_grad");                  \
-          user_op::UserOpConfWrapper binary_grad_op =                                           \
-              builder.Op(std::string("") + math_binary_elementwise_type + "_y_grad")            \
-                  .Input("x", op.input("x", 0))                                                 \
-                  .Input("y", op.input("y", 0))                                                 \
-                  .Input("dz", op.GetGradTensorWithOpOutput("z", 0))                            \
-                  .Output("dy")                                                                 \
-                  .Build();                                                                     \
-          op.BindGradTensorWithOpInput(binary_grad_op.output("dy", 0), "y", 0);                 \
-          AddOp(binary_grad_op);                                                                \
-        }                                                                                       \
-      });
+      .SetGenBackwardOpConfFn(                                                                  \
+          [](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) -> Maybe<void> {         \
+            if (op.NeedGenGradTensor4OpInput("x", 0)) {                                         \
+              user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_x_grad");              \
+              user_op::UserOpConfWrapper binary_grad_op =                                       \
+                  builder.Op(std::string("") + math_binary_elementwise_type + "_x_grad")        \
+                      .Input("x", op.input("x", 0))                                             \
+                      .Input("y", op.input("y", 0))                                             \
+                      .Input("dz", op.GetGradTensorWithOpOutput("z", 0))                        \
+                      .Output("dx")                                                             \
+                      .Build();                                                                 \
+              op.BindGradTensorWithOpInput(binary_grad_op.output("dx", 0), "x", 0);             \
+              AddOp(binary_grad_op);                                                            \
+            }                                                                                   \
+            if (op.NeedGenGradTensor4OpInput("y", 0)) {                                         \
+              user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_y_grad");              \
+              user_op::UserOpConfWrapper binary_grad_op =                                       \
+                  builder.Op(std::string("") + math_binary_elementwise_type + "_y_grad")        \
+                      .Input("x", op.input("x", 0))                                             \
+                      .Input("y", op.input("y", 0))                                             \
+                      .Input("dz", op.GetGradTensorWithOpOutput("z", 0))                        \
+                      .Output("dy")                                                             \
+                      .Build();                                                                 \
+              op.BindGradTensorWithOpInput(binary_grad_op.output("dy", 0), "y", 0);             \
+              AddOp(binary_grad_op);                                                            \
+            }                                                                                   \
+            return Maybe<void>::Ok();                                                           \
+          });
 
 OF_PP_FOR_EACH_TUPLE(REGISTER_MATH_BINARY_ELEMENTWISE_OP_AND_GRAD, MATH_BINARY_ELEMENTWISE_FUNC_SEQ)
 
diff --git a/oneflow/user/ops/math_unary_elementwise_op.cpp b/oneflow/user/ops/math_unary_elementwise_op.cpp
index 1fee85fc5..69cc34fb1 100644
--- a/oneflow/user/ops/math_unary_elementwise_op.cpp
+++ b/oneflow/user/ops/math_unary_elementwise_op.cpp
@@ -33,19 +33,21 @@ namespace oneflow {
       .SetGetSbpFn(user_op::GetSbpFnUtil::SplitForEachAxis)                                   \
       .SetDataTypeInferFn(user_op::TensorDescInferFnUtil::UnchangedDataType);                 \
   REGISTER_USER_OP_GRAD(math_unary_elementwise_type)                                          \
-      .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {  \
-        if (op.NeedGenGradTensor4OpInput("x", 0)) {                                           \
-          user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");                  \
-          user_op::UserOpConfWrapper unary_grad_op =                                          \
-              builder.Op(std::string("") + math_unary_elementwise_type + "_grad")             \
-                  .Input("x", op.input("x", 0))                                               \
-                  .Input("dy", op.GetGradTensorWithOpOutput("y", 0))                          \
-                  .Output("dx")                                                               \
-                  .Build();                                                                   \
-          op.BindGradTensorWithOpInput(unary_grad_op.output("dx", 0), "x", 0);                \
-          AddOp(unary_grad_op);                                                               \
-        }                                                                                     \
-      });
+      .SetGenBackwardOpConfFn(                                                                \
+          [](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) -> Maybe<void> {       \
+            if (op.NeedGenGradTensor4OpInput("x", 0)) {                                       \
+              user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");              \
+              user_op::UserOpConfWrapper unary_grad_op =                                      \
+                  builder.Op(std::string("") + math_unary_elementwise_type + "_grad")         \
+                      .Input("x", op.input("x", 0))                                           \
+                      .Input("dy", op.GetGradTensorWithOpOutput("y", 0))                      \
+                      .Output("dx")                                                           \
+                      .Build();                                                               \
+              op.BindGradTensorWithOpInput(unary_grad_op.output("dx", 0), "x", 0);            \
+              AddOp(unary_grad_op);                                                           \
+            }                                                                                 \
+            return Maybe<void>::Ok();                                                         \
+          });
 
 OF_PP_FOR_EACH_TUPLE(REGISTER_MATH_UNARY_ELEMENTWISE_OP_AND_GRAD, MATH_UNARY_ELEMENTWISE_FUNC_SEQ)
 
diff --git a/oneflow/user/ops/matmul_op.cpp b/oneflow/user/ops/matmul_op.cpp
index be028844d..6c7cdccbd 100644
--- a/oneflow/user/ops/matmul_op.cpp
+++ b/oneflow/user/ops/matmul_op.cpp
@@ -205,8 +205,9 @@ REGISTER_USER_OP("matmul")
     .SetDataTypeInferFn(InferDataType4Matmul);
 
 REGISTER_USER_OP_GRAD("matmul").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                          user_op::AddOpFn AddOp) {
-  return GenBackwardOpConf4Matmul("matmul", op, AddOp);
+                                                          user_op::AddOpFn AddOp) -> Maybe<void> {
+  GenBackwardOpConf4Matmul("matmul", op, AddOp);
+  return Maybe<void>::Ok();
 });
 
 REGISTER_USER_OP("batch_matmul")
@@ -233,8 +234,10 @@ REGISTER_USER_OP("batch_matmul")
     .SetDataTypeInferFn(InferDataType4Matmul);
 
 REGISTER_USER_OP_GRAD("batch_matmul")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
-      return GenBackwardOpConf4Matmul("batch_matmul", op, AddOp);
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
+      GenBackwardOpConf4Matmul("batch_matmul", op, AddOp);
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("broadcast_matmul")
diff --git a/oneflow/user/ops/multiply_op.cpp b/oneflow/user/ops/multiply_op.cpp
index 701fa8638..59e455570 100644
--- a/oneflow/user/ops/multiply_op.cpp
+++ b/oneflow/user/ops/multiply_op.cpp
@@ -58,7 +58,8 @@ REGISTER_USER_OP("multiply")
     });
 
 REGISTER_USER_OP_GRAD("multiply")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapper x_grad_op =
             user_op::UserOpConfWrapperBuilder(op.op_name() + "_x_grad")
@@ -81,6 +82,7 @@ REGISTER_USER_OP_GRAD("multiply")
         op.BindGradTensorWithOpInput(y_grad_op.output("out", 0), "y", 0);
         AddOp(y_grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/nd_index_slice_ops.cpp b/oneflow/user/ops/nd_index_slice_ops.cpp
index 761969fe0..d60f60aa6 100644
--- a/oneflow/user/ops/nd_index_slice_ops.cpp
+++ b/oneflow/user/ops/nd_index_slice_ops.cpp
@@ -283,7 +283,8 @@ REGISTER_USER_OP("tensor_scatter_nd_add")
     });
 
 REGISTER_USER_OP_GRAD("gather_nd")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("params", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -296,10 +297,12 @@ REGISTER_USER_OP_GRAD("gather_nd")
         op.BindGradTensorWithOpInput(grad_op.output("out", 0), "params", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("scatter_nd")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("updates", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -311,10 +314,12 @@ REGISTER_USER_OP_GRAD("scatter_nd")
         op.BindGradTensorWithOpInput(grad_op.output("out", 0), "updates", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("tensor_scatter_nd_update")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("updates", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_updates_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -344,10 +349,12 @@ REGISTER_USER_OP_GRAD("tensor_scatter_nd_update")
         op.BindGradTensorWithOpInput(grad_op.output("out", 0), "params", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("tensor_scatter_nd_add")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("updates", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_updates_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -362,5 +369,6 @@ REGISTER_USER_OP_GRAD("tensor_scatter_nd_add")
       if (op.NeedGenGradTensor4OpInput("params", 0)) {
         op.BindGradTensorWithOpInput(op.GetGradTensorWithOpOutput("out", 0), "params", 0);
       }
+      return Maybe<void>::Ok();
     });
 }  // namespace oneflow
diff --git a/oneflow/user/ops/nvtx_range_op.cpp b/oneflow/user/ops/nvtx_range_op.cpp
index 084f8a750..e0a222a7b 100644
--- a/oneflow/user/ops/nvtx_range_op.cpp
+++ b/oneflow/user/ops/nvtx_range_op.cpp
@@ -76,7 +76,8 @@ REGISTER_USER_OP("nvtx_end")
     });
 
 REGISTER_USER_OP_GRAD("nvtx_start")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper nvtx_end_op =
@@ -88,10 +89,12 @@ REGISTER_USER_OP_GRAD("nvtx_start")
         op.BindGradTensorWithOpInput(nvtx_end_op.output("out", 0), "in", 0);
         AddOp(nvtx_end_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("nvtx_end")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper nvtx_start_op =
@@ -103,6 +106,7 @@ REGISTER_USER_OP_GRAD("nvtx_end")
         op.BindGradTensorWithOpInput(nvtx_start_op.output("out", 0), "in", 0);
         AddOp(nvtx_start_op);
       }
+      return Maybe<void>::Ok();
     });
 }  // namespace
 
diff --git a/oneflow/user/ops/pad2d_ops.cpp b/oneflow/user/ops/pad2d_ops.cpp
index c13009985..a64e0092d 100644
--- a/oneflow/user/ops/pad2d_ops.cpp
+++ b/oneflow/user/ops/pad2d_ops.cpp
@@ -131,7 +131,8 @@ REGISTER_USER_OP("reflection_pad2d_grad")
     });
 
 REGISTER_USER_OP_GRAD("reflection_pad2d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -143,6 +144,7 @@ REGISTER_USER_OP_GRAD("reflection_pad2d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("replication_pad2d")
@@ -224,7 +226,8 @@ REGISTER_USER_OP("replication_pad2d_grad")
     });
 
 REGISTER_USER_OP_GRAD("replication_pad2d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -236,6 +239,7 @@ REGISTER_USER_OP_GRAD("replication_pad2d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("constant_pad2d")
@@ -321,7 +325,8 @@ REGISTER_USER_OP("constant_pad2d_grad")
     });
 
 REGISTER_USER_OP_GRAD("constant_pad2d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -335,6 +340,7 @@ REGISTER_USER_OP_GRAD("constant_pad2d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/pad_op.cpp b/oneflow/user/ops/pad_op.cpp
index d310cd6e7..27604575e 100644
--- a/oneflow/user/ops/pad_op.cpp
+++ b/oneflow/user/ops/pad_op.cpp
@@ -97,7 +97,7 @@ REGISTER_USER_OP("pad_grad")
     });
 
 REGISTER_USER_OP_GRAD("pad").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                       user_op::AddOpFn AddOp) {
+                                                       user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("x", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op =
@@ -112,10 +112,12 @@ REGISTER_USER_OP_GRAD("pad").SetGenBackwardOpConfFn([](const user_op::UserOpWrap
     op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 REGISTER_USER_OP_GRAD("pad_grad")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("dy", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -130,6 +132,7 @@ REGISTER_USER_OP_GRAD("pad_grad")
         op.BindGradTensorWithOpInput(grad_op.output("y", 0), "dy", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/pool_op.cpp b/oneflow/user/ops/pool_op.cpp
index 162650826..0f60c8bf3 100644
--- a/oneflow/user/ops/pool_op.cpp
+++ b/oneflow/user/ops/pool_op.cpp
@@ -21,7 +21,7 @@ namespace oneflow {
 namespace {
 
 typedef std::function<Maybe<void>(user_op::InferContext* ctx)> TensorDescInferFn;
-typedef std::function<void(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp)>
+typedef std::function<Maybe<void>(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp)>
     GenBackwardOpConfFn;
 
 TensorDescInferFn MakeFwTensorDescInferFn(const int32_t dim) {
@@ -87,7 +87,7 @@ Maybe<void> BwGetSbpFn(user_op::SbpContext* ctx) {
 }
 
 GenBackwardOpConfFn MakeGenBackwardOpConfFn(const std::string& mode, const int32_t dim) {
-  return [mode, dim](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+  return [mode, dim](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) -> Maybe<void> {
     if (op.NeedGenGradTensor4OpInput("x", 0)) {
       user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
       user_op::UserOpConfWrapper grad_op =
@@ -107,6 +107,7 @@ GenBackwardOpConfFn MakeGenBackwardOpConfFn(const std::string& mode, const int32
       op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
       AddOp(grad_op);
     }
+    return Maybe<void>::Ok();
   };
 }
 
diff --git a/oneflow/user/ops/pooling_op.cpp b/oneflow/user/ops/pooling_op.cpp
index 09577f81e..03896b002 100644
--- a/oneflow/user/ops/pooling_op.cpp
+++ b/oneflow/user/ops/pooling_op.cpp
@@ -21,7 +21,7 @@ namespace oneflow {
 namespace {
 
 typedef std::function<Maybe<void>(user_op::InferContext* ctx)> TensorDescInferFn;
-typedef std::function<void(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp)>
+typedef std::function<Maybe<void>(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp)>
     GenBackwardOpConfFn;
 
 TensorDescInferFn MakeForwardTensorDescInferFn(const int32_t dim) {
@@ -112,7 +112,7 @@ Maybe<void> BwInferDataType(user_op::InferContext* ctx) {
 }
 
 GenBackwardOpConfFn MakeBackwardOpConfFn(const std::string& mode, const int32_t dim) {
-  return [mode, dim](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+  return [mode, dim](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) -> Maybe<void> {
     if (op.NeedGenGradTensor4OpInput("x", 0)) {
       user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
       user_op::UserOpConfWrapper grad_op =
@@ -135,6 +135,7 @@ GenBackwardOpConfFn MakeBackwardOpConfFn(const std::string& mode, const int32_t
       op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
       AddOp(grad_op);
     }
+    return Maybe<void>::Ok();
   };
 }
 
diff --git a/oneflow/user/ops/prelu_op.cpp b/oneflow/user/ops/prelu_op.cpp
index 672b52207..8a6aaab3d 100644
--- a/oneflow/user/ops/prelu_op.cpp
+++ b/oneflow/user/ops/prelu_op.cpp
@@ -121,7 +121,7 @@ REGISTER_USER_OP("prelu_grad")
     });
 
 REGISTER_USER_OP_GRAD("prelu").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                         user_op::AddOpFn AddOp) {
+                                                         user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("x", 0) || op.NeedGenGradTensor4OpInput("alpha", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op = builder.Op("prelu_grad")
@@ -146,6 +146,7 @@ REGISTER_USER_OP_GRAD("prelu").SetGenBackwardOpConfFn([](const user_op::UserOpWr
       op.BindGradTensorWithOpInput(alpha_identity_op.output("out", 0), "alpha", 0);
     }
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/reduce_ops.cpp b/oneflow/user/ops/reduce_ops.cpp
index e6ed4c5de..5f989736e 100644
--- a/oneflow/user/ops/reduce_ops.cpp
+++ b/oneflow/user/ops/reduce_ops.cpp
@@ -93,7 +93,8 @@ REGISTER_REDUCE_USER_OP("reduce_sum", BinaryFuncSum)
 REGISTER_REDUCE_USER_OP("reduce_max", BinaryFuncMax)
 
 REGISTER_USER_OP_GRAD("reduce_sum")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("input_tensor", 0)) {
         const auto& axes = op.attr<std::vector<int32_t>>("axis");
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
@@ -107,9 +108,11 @@ REGISTER_USER_OP_GRAD("reduce_sum")
         op.BindGradTensorWithOpInput(reduce_sum_grad_op.output("y", 0), "input_tensor", 0);
         AddOp(reduce_sum_grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
-void GenerateBackwardOpConf4ReduceMaxMin(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenerateBackwardOpConf4ReduceMaxMin(const user_op::UserOpWrapper& op,
+                                                user_op::AddOpFn AddOp) {
   if (op.NeedGenGradTensor4OpInput("input_tensor", 0)) {
     const auto& axes = op.attr<std::vector<int32_t>>("axis");
 
@@ -179,6 +182,7 @@ void GenerateBackwardOpConf4ReduceMaxMin(const user_op::UserOpWrapper& op, user_
     AddOp(multiply_mask_op);
     op.BindGradTensorWithOpInput(multiply_mask_op.output("out", 0), "input_tensor", 0);
   }
+  return Maybe<void>::Ok();
 }
 
 REGISTER_USER_OP_GRAD("reduce_max").SetGenBackwardOpConfFn(GenerateBackwardOpConf4ReduceMaxMin);
diff --git a/oneflow/user/ops/reshape_like_op.cpp b/oneflow/user/ops/reshape_like_op.cpp
index 8a3f360f9..1341e2a7f 100644
--- a/oneflow/user/ops/reshape_like_op.cpp
+++ b/oneflow/user/ops/reshape_like_op.cpp
@@ -71,7 +71,8 @@ REGISTER_USER_OP("reshape_like")
     });
 
 REGISTER_USER_OP_GRAD("reshape_like")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         const auto& in_desc = op.TensorDesc4ArgNameAndIndex("in", 0);
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
@@ -95,6 +96,7 @@ REGISTER_USER_OP_GRAD("reshape_like")
           AddOp(reshape_grad_op);
         }
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/reshape_op.cpp b/oneflow/user/ops/reshape_op.cpp
index 29195aed0..573028fb2 100644
--- a/oneflow/user/ops/reshape_op.cpp
+++ b/oneflow/user/ops/reshape_op.cpp
@@ -88,7 +88,7 @@ REGISTER_USER_OP("reshape")
     .SetDataTypeInferFn(InferDataType);
 
 REGISTER_USER_OP_GRAD("reshape").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                           user_op::AddOpFn AddOp) {
+                                                           user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     const auto& in_desc = op.TensorDesc4ArgNameAndIndex("in", 0);
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
@@ -112,6 +112,7 @@ REGISTER_USER_OP_GRAD("reshape").SetGenBackwardOpConfFn([](const user_op::UserOp
       AddOp(reshape_grad_op);
     }
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace
diff --git a/oneflow/user/ops/same_padding_op.cpp b/oneflow/user/ops/same_padding_op.cpp
index bcdb5cf39..ab516f0bb 100644
--- a/oneflow/user/ops/same_padding_op.cpp
+++ b/oneflow/user/ops/same_padding_op.cpp
@@ -127,7 +127,8 @@ REGISTER_USER_OP("same_padding_grad")
     });
 
 REGISTER_USER_OP_GRAD("same_padding")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         const std::string& padding = op.attr<std::string>("padding");
         const std::string& data_format = op.attr<std::string>("data_format");
@@ -149,6 +150,7 @@ REGISTER_USER_OP_GRAD("same_padding")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace user_op
diff --git a/oneflow/user/ops/scalar_add_op.cpp b/oneflow/user/ops/scalar_add_op.cpp
index 20378b378..86603f5c8 100644
--- a/oneflow/user/ops/scalar_add_op.cpp
+++ b/oneflow/user/ops/scalar_add_op.cpp
@@ -42,10 +42,12 @@ REGISTER_USER_OP("scalar_add")
     });
 
 REGISTER_USER_OP_GRAD("scalar_add")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         op.BindGradTensorWithOpInput(op.GetGradTensorWithOpOutput("out", 0), "in", 0);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/scalar_by_tensor_op.cpp b/oneflow/user/ops/scalar_by_tensor_op.cpp
index d159e6bd5..dd330ccb1 100644
--- a/oneflow/user/ops/scalar_by_tensor_op.cpp
+++ b/oneflow/user/ops/scalar_by_tensor_op.cpp
@@ -77,7 +77,8 @@ REGISTER_USER_OP("scalar_add_by_tensor")
     }));
 
 REGISTER_USER_OP_GRAD("scalar_add_by_tensor")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         op.BindGradTensorWithOpInput(op.GetGradTensorWithOpOutput("y", 0), "x", 0);
       }
@@ -95,6 +96,7 @@ REGISTER_USER_OP_GRAD("scalar_add_by_tensor")
         op.BindGradTensorWithOpInput(grad_op.output("output_tensor", 0), "scalar", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("scalar_sub_by_tensor")
@@ -113,7 +115,8 @@ REGISTER_USER_OP("scalar_sub_by_tensor")
     }));
 
 REGISTER_USER_OP_GRAD("scalar_sub_by_tensor")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         op.BindGradTensorWithOpInput(op.GetGradTensorWithOpOutput("y", 0), "x", 0);
       }
@@ -142,6 +145,7 @@ REGISTER_USER_OP_GRAD("scalar_sub_by_tensor")
         AddOp(scalar_grad_reduce_sum_op);
         AddOp(scalar_grad_scalar_mul_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("scalar_mul_by_tensor")
@@ -165,7 +169,8 @@ REGISTER_USER_OP("scalar_mul_by_tensor")
     }));
 
 REGISTER_USER_OP_GRAD("scalar_mul_by_tensor")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op = builder.Op("scalar_mul_by_tensor")
@@ -200,6 +205,7 @@ REGISTER_USER_OP_GRAD("scalar_mul_by_tensor")
         AddOp(scalar_grad_multiply_op);
         AddOp(scalar_grad_reduce_sum_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("scalar_div_by_tensor")
@@ -218,7 +224,8 @@ REGISTER_USER_OP("scalar_div_by_tensor")
     }));
 
 REGISTER_USER_OP_GRAD("scalar_div_by_tensor")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op = builder.Op("scalar_div_by_tensor")
@@ -240,6 +247,7 @@ REGISTER_USER_OP_GRAD("scalar_div_by_tensor")
         op.BindGradTensorWithOpInput(grad_op.output("dy", 0), "scalar", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/scalar_mul_op.cpp b/oneflow/user/ops/scalar_mul_op.cpp
index 08546a57d..b50c966f3 100644
--- a/oneflow/user/ops/scalar_mul_op.cpp
+++ b/oneflow/user/ops/scalar_mul_op.cpp
@@ -43,7 +43,8 @@ REGISTER_USER_OP("scalar_mul")
     });
 
 REGISTER_USER_OP_GRAD("scalar_mul")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -58,6 +59,7 @@ REGISTER_USER_OP_GRAD("scalar_mul")
         op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/sigmoid_cross_entropy_op.cpp b/oneflow/user/ops/sigmoid_cross_entropy_op.cpp
index 225d833ac..cdf7e7887 100644
--- a/oneflow/user/ops/sigmoid_cross_entropy_op.cpp
+++ b/oneflow/user/ops/sigmoid_cross_entropy_op.cpp
@@ -94,7 +94,8 @@ REGISTER_USER_OP("sigmoid_cross_entropy_grad")
     });
 
 REGISTER_USER_OP_GRAD("sigmoid_cross_entropy")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("prediction", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -107,5 +108,6 @@ REGISTER_USER_OP_GRAD("sigmoid_cross_entropy")
         op.BindGradTensorWithOpInput(grad_op.output("prediction_diff", 0), "prediction", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 }  // namespace oneflow
diff --git a/oneflow/user/ops/sigmoid_op.cpp b/oneflow/user/ops/sigmoid_op.cpp
index a5d04068d..6d0e393c7 100644
--- a/oneflow/user/ops/sigmoid_op.cpp
+++ b/oneflow/user/ops/sigmoid_op.cpp
@@ -73,7 +73,7 @@ REGISTER_USER_OP("sigmoid_grad")
     });
 
 REGISTER_USER_OP_GRAD("sigmoid").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                           user_op::AddOpFn AddOp) {
+                                                           user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper sigmoid_grad_op =
@@ -85,6 +85,7 @@ REGISTER_USER_OP_GRAD("sigmoid").SetGenBackwardOpConfFn([](const user_op::UserOp
     op.BindGradTensorWithOpInput(sigmoid_grad_op.output("dx", 0), "in", 0);
     AddOp(sigmoid_grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace
diff --git a/oneflow/user/ops/slice_op.cpp b/oneflow/user/ops/slice_op.cpp
index f8c2cf004..a41ba41f0 100644
--- a/oneflow/user/ops/slice_op.cpp
+++ b/oneflow/user/ops/slice_op.cpp
@@ -213,7 +213,7 @@ Maybe<void> GetSliceUpdateOpSbpSignature(user_op::SbpContext* ctx) {
   return Maybe<void>::Ok();
 }
 
-void GenSliceGradOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenSliceGradOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
   if (op.NeedGenGradTensor4OpInput("x", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op = builder.Op("slice_grad")
@@ -227,6 +227,7 @@ void GenSliceGradOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
     op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
 Maybe<void> InferLogicalSliceAssignTensorDesc(user_op::InferContext* ctx) {
diff --git a/oneflow/user/ops/smooth_l1_loss_op.cpp b/oneflow/user/ops/smooth_l1_loss_op.cpp
index fd753fbdc..ec3c43842 100644
--- a/oneflow/user/ops/smooth_l1_loss_op.cpp
+++ b/oneflow/user/ops/smooth_l1_loss_op.cpp
@@ -83,7 +83,8 @@ REGISTER_USER_OP("smooth_l1_loss_grad")
     });
 
 REGISTER_USER_OP_GRAD("smooth_l1_loss")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("prediction", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -97,6 +98,7 @@ REGISTER_USER_OP_GRAD("smooth_l1_loss")
         op.BindGradTensorWithOpInput(grad_op.output("prediction_grad", 0), "prediction", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/softmax_cross_entropy_op.cpp b/oneflow/user/ops/softmax_cross_entropy_op.cpp
index da8189d61..de26a22bc 100644
--- a/oneflow/user/ops/softmax_cross_entropy_op.cpp
+++ b/oneflow/user/ops/softmax_cross_entropy_op.cpp
@@ -114,7 +114,8 @@ REGISTER_USER_OP("softmax_cross_entropy_grad")
     });
 
 REGISTER_USER_OP_GRAD("softmax_cross_entropy")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("prediction", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -127,6 +128,7 @@ REGISTER_USER_OP_GRAD("softmax_cross_entropy")
         op.BindGradTensorWithOpInput(grad_op.output("prediction_diff", 0), "prediction", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/softmax_op.cpp b/oneflow/user/ops/softmax_op.cpp
index af531b735..67491a6cc 100644
--- a/oneflow/user/ops/softmax_op.cpp
+++ b/oneflow/user/ops/softmax_op.cpp
@@ -71,7 +71,7 @@ REGISTER_USER_OP("softmax_grad")
     });
 
 REGISTER_USER_OP_GRAD("softmax").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                           user_op::AddOpFn AddOp) {
+                                                           user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper softmax_grad_op =
@@ -83,6 +83,7 @@ REGISTER_USER_OP_GRAD("softmax").SetGenBackwardOpConfFn([](const user_op::UserOp
     op.BindGradTensorWithOpInput(softmax_grad_op.output("dx", 0), "in", 0);
     AddOp(softmax_grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace
diff --git a/oneflow/user/ops/sparse_cross_entropy_op.cpp b/oneflow/user/ops/sparse_cross_entropy_op.cpp
index 8972b7d14..ff4ea687b 100644
--- a/oneflow/user/ops/sparse_cross_entropy_op.cpp
+++ b/oneflow/user/ops/sparse_cross_entropy_op.cpp
@@ -130,9 +130,9 @@ Maybe<void> GetSbpFn(user_op::SbpContext* ctx) {
   return Maybe<void>::Ok();
 }
 
-void GenBackwardOpConf4SparseCrossEntropy(const std::string& op_type_name,
-                                          const user_op::UserOpWrapper& op,
-                                          user_op::AddOpFn AddOp) {
+Maybe<void> GenBackwardOpConf4SparseCrossEntropy(const std::string& op_type_name,
+                                                 const user_op::UserOpWrapper& op,
+                                                 user_op::AddOpFn AddOp) {
   if (op.NeedGenGradTensor4OpInput("prediction", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op = builder.Op(op_type_name)
@@ -145,6 +145,7 @@ void GenBackwardOpConf4SparseCrossEntropy(const std::string& op_type_name,
     op.BindGradTensorWithOpInput(grad_op.output("prediction_diff", 0), "prediction", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
 }  // namespace
@@ -183,12 +184,14 @@ REGISTER_SPAESE_CROSS_ENTROPY_GRAD_USER_OP("sparse_cross_entropy_grad", AddGradS
 REGISTER_SPAESE_CROSS_ENTROPY_GRAD_USER_OP("sparse_cross_entropy_ms_grad", AddGradMsSignature);
 
 REGISTER_USER_OP_GRAD("sparse_cross_entropy")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       return GenBackwardOpConf4SparseCrossEntropy("sparse_cross_entropy_grad", op, AddOp);
     });
 
 REGISTER_USER_OP_GRAD("sparse_cross_entropy_ms")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       return GenBackwardOpConf4SparseCrossEntropy("sparse_cross_entropy_ms_grad", op, AddOp);
     });
 
diff --git a/oneflow/user/ops/sparse_softmax_cross_entropy_op.cpp b/oneflow/user/ops/sparse_softmax_cross_entropy_op.cpp
index e8b8e2df8..9310adbf7 100644
--- a/oneflow/user/ops/sparse_softmax_cross_entropy_op.cpp
+++ b/oneflow/user/ops/sparse_softmax_cross_entropy_op.cpp
@@ -134,9 +134,9 @@ Maybe<void> GetSbpFn(user_op::SbpContext* ctx) {
   return Maybe<void>::Ok();
 }
 
-void GenBackwardOpConf4SparseSoftmaxCrossEntropy(const std::string& op_type_name,
-                                                 const user_op::UserOpWrapper& op,
-                                                 user_op::AddOpFn AddOp) {
+Maybe<void> GenBackwardOpConf4SparseSoftmaxCrossEntropy(const std::string& op_type_name,
+                                                        const user_op::UserOpWrapper& op,
+                                                        user_op::AddOpFn AddOp) {
   if (op.NeedGenGradTensor4OpInput("prediction", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op = builder.Op(op_type_name)
@@ -149,6 +149,7 @@ void GenBackwardOpConf4SparseSoftmaxCrossEntropy(const std::string& op_type_name
     op.BindGradTensorWithOpInput(grad_op.output("prediction_diff", 0), "prediction", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
 }  // namespace
@@ -190,13 +191,15 @@ REGISTER_SPAESE_SOFTMAX_CROSS_ENTROPY_GRAD_USER_OP("sparse_softmax_cross_entropy
                                                    AddGradMsSignature);
 
 REGISTER_USER_OP_GRAD("sparse_softmax_cross_entropy")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       return GenBackwardOpConf4SparseSoftmaxCrossEntropy("sparse_softmax_cross_entropy_grad", op,
                                                          AddOp);
     });
 
 REGISTER_USER_OP_GRAD("sparse_softmax_cross_entropy_ms")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       return GenBackwardOpConf4SparseSoftmaxCrossEntropy("sparse_softmax_cross_entropy_ms_grad", op,
                                                          AddOp);
     });
diff --git a/oneflow/user/ops/split_like_op.cpp b/oneflow/user/ops/split_like_op.cpp
index 06249e0e3..cdaaa680c 100644
--- a/oneflow/user/ops/split_like_op.cpp
+++ b/oneflow/user/ops/split_like_op.cpp
@@ -121,7 +121,7 @@ Maybe<void> GetSbpSignature(user_op::SbpContext* ctx) {
   return Maybe<void>::Ok();
 }
 
-void GenGradOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenGradOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
   const int64_t axis = op.attr<int64_t>("axis");
   const int32_t out_size = op.output_size("out");
   int64_t max_dim_size = 0;
@@ -153,6 +153,7 @@ void GenGradOp(const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
     op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
 }  // namespace
diff --git a/oneflow/user/ops/squeeze_op.cpp b/oneflow/user/ops/squeeze_op.cpp
index b8efb2b52..9cc78594c 100644
--- a/oneflow/user/ops/squeeze_op.cpp
+++ b/oneflow/user/ops/squeeze_op.cpp
@@ -91,7 +91,7 @@ REGISTER_USER_OP("squeeze")
     .SetGetSbpFn(SqueezeGetSbpFn);
 
 REGISTER_USER_OP_GRAD("squeeze").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                           user_op::AddOpFn AddOp) {
+                                                           user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op = builder.Op("reshape_like")
@@ -102,6 +102,7 @@ REGISTER_USER_OP_GRAD("squeeze").SetGenBackwardOpConfFn([](const user_op::UserOp
     op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/test_ops.cpp b/oneflow/user/ops/test_ops.cpp
index 6cb248037..d44cdc73d 100644
--- a/oneflow/user/ops/test_ops.cpp
+++ b/oneflow/user/ops/test_ops.cpp
@@ -62,7 +62,7 @@ REGISTER_USER_OP("ccrelu_grad")
     });
 
 REGISTER_USER_OP_GRAD("ccrelu").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                          user_op::AddOpFn AddOp) {
+                                                          user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper ccrelu_grad_op =
@@ -74,6 +74,7 @@ REGISTER_USER_OP_GRAD("ccrelu").SetGenBackwardOpConfFn([](const user_op::UserOpW
     op.BindGradTensorWithOpInput(ccrelu_grad_op.output("dx", 0), "in", 0);
     AddOp(ccrelu_grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 REGISTER_USER_OP("TestReshape")
@@ -220,7 +221,8 @@ REGISTER_USER_OP("TestMultiInputGrad")
     });
 
 REGISTER_USER_OP_GRAD("TestMultiInput")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x1", 0) || op.NeedGenGradTensor4OpInput("x2", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper test_multi_input_grad_op =
@@ -235,6 +237,7 @@ REGISTER_USER_OP_GRAD("TestMultiInput")
         op.BindGradTensorWithOpInput(test_multi_input_grad_op.output("x2_diff", 0), "x2", 0);
         AddOp(test_multi_input_grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("TestDynamicSource")
diff --git a/oneflow/user/ops/transpose_ops.cpp b/oneflow/user/ops/transpose_ops.cpp
index 307617712..05ed4bc1a 100644
--- a/oneflow/user/ops/transpose_ops.cpp
+++ b/oneflow/user/ops/transpose_ops.cpp
@@ -68,7 +68,8 @@ REGISTER_USER_OP("transpose")
     });
 
 REGISTER_USER_OP_GRAD("transpose")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("input", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         const auto& tmp = op.attr<std::vector<int32_t>>("perm");
@@ -84,5 +85,6 @@ REGISTER_USER_OP_GRAD("transpose")
         op.BindGradTensorWithOpInput(transpose_grad_op.output("output", 0), "input", 0);
         AddOp(transpose_grad_op);
       }
+      return Maybe<void>::Ok();
     });
 }  // namespace oneflow
diff --git a/oneflow/user/ops/tril_op.cpp b/oneflow/user/ops/tril_op.cpp
index 2055837de..7324a1a33 100644
--- a/oneflow/user/ops/tril_op.cpp
+++ b/oneflow/user/ops/tril_op.cpp
@@ -56,7 +56,7 @@ REGISTER_USER_OP("tril")
     });
 
 REGISTER_USER_OP_GRAD("tril").SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
-                                                        user_op::AddOpFn AddOp) {
+                                                        user_op::AddOpFn AddOp) -> Maybe<void> {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op = builder.Op("tril")
@@ -67,6 +67,7 @@ REGISTER_USER_OP_GRAD("tril").SetGenBackwardOpConfFn([](const user_op::UserOpWra
     op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 });
 
 REGISTER_USER_OP("fused_scale_tril")
@@ -111,7 +112,8 @@ REGISTER_USER_OP("fused_scale_tril")
     });
 
 REGISTER_USER_OP_GRAD("fused_scale_tril")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("in", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -126,6 +128,7 @@ REGISTER_USER_OP_GRAD("fused_scale_tril")
         op.BindGradTensorWithOpInput(grad_op.output("out", 0), "in", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/tuple_identity_op.cpp b/oneflow/user/ops/tuple_identity_op.cpp
index ec960b39e..f829f9d72 100644
--- a/oneflow/user/ops/tuple_identity_op.cpp
+++ b/oneflow/user/ops/tuple_identity_op.cpp
@@ -63,13 +63,15 @@ REGISTER_USER_OP("tuple_identity")
     .SetGetSbpFn(user_op::GetSbpFnUtil::DefaultBroadcastToBroadcast);
 
 REGISTER_USER_OP_GRAD("tuple_identity")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       int32_t in_size = op.input_size("in");
       for (int i = 0; i < in_size; ++i) {
         if (op.NeedGenGradTensor4OpInput("in", i)) {
           op.BindGradTensorWithOpInput(op.GetGradTensorWithOpOutput("out", i), "in", i);
         }
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/two_stage_reduce_ops.cpp b/oneflow/user/ops/two_stage_reduce_ops.cpp
index 722af2d5b..97ac727c2 100644
--- a/oneflow/user/ops/two_stage_reduce_ops.cpp
+++ b/oneflow/user/ops/two_stage_reduce_ops.cpp
@@ -228,8 +228,9 @@ REGISTER_REDUCE_DEVICE_STAGE_USER_OP("reduce_max_device_stage")
 REGISTER_REDUCE_DEVICE_STAGE_GRAD_USER_OP("reduce_min_device_stage_grad")
 REGISTER_REDUCE_DEVICE_STAGE_GRAD_USER_OP("reduce_max_device_stage_grad")
 
-void GenBackwardOpConf4ReduceDeviceStage(const std::string& op_type_name,
-                                         const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenBackwardOpConf4ReduceDeviceStage(const std::string& op_type_name,
+                                                const user_op::UserOpWrapper& op,
+                                                user_op::AddOpFn AddOp) {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op =
@@ -243,13 +244,15 @@ void GenBackwardOpConf4ReduceDeviceStage(const std::string& op_type_name,
     op.BindGradTensorWithOpInput(grad_op.output("in_diff", 0), "in", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
-#define REGISTER_REDUCE_DEVICE_STAGE_USER_OP_GRAD(op_type_name, grad_op_type_name)           \
-  REGISTER_USER_OP_GRAD(op_type_name)                                                        \
-      .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) { \
-        return GenBackwardOpConf4ReduceDeviceStage(grad_op_type_name, op, AddOp);            \
-      });
+#define REGISTER_REDUCE_DEVICE_STAGE_USER_OP_GRAD(op_type_name, grad_op_type_name)      \
+  REGISTER_USER_OP_GRAD(op_type_name)                                                   \
+      .SetGenBackwardOpConfFn(                                                          \
+          [](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) -> Maybe<void> { \
+            return GenBackwardOpConf4ReduceDeviceStage(grad_op_type_name, op, AddOp);   \
+          });
 REGISTER_REDUCE_DEVICE_STAGE_USER_OP_GRAD("reduce_min_device_stage", "reduce_min_device_stage_grad")
 REGISTER_REDUCE_DEVICE_STAGE_USER_OP_GRAD("reduce_max_device_stage", "reduce_max_device_stage_grad")
 
@@ -298,8 +301,9 @@ REGISTER_REDUCE_GLOBAL_STAGE_USER_OP("reduce_max_global_stage")
 REGISTER_REDUCE_GLOBAL_STAGE_GRAD_USER_OP("reduce_min_global_stage_grad")
 REGISTER_REDUCE_GLOBAL_STAGE_GRAD_USER_OP("reduce_max_global_stage_grad")
 
-void GenBackwardOpConf4ReduceGlobalStage(const std::string& op_type_name,
-                                         const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+Maybe<void> GenBackwardOpConf4ReduceGlobalStage(const std::string& op_type_name,
+                                                const user_op::UserOpWrapper& op,
+                                                user_op::AddOpFn AddOp) {
   if (op.NeedGenGradTensor4OpInput("in", 0)) {
     user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
     user_op::UserOpConfWrapper grad_op =
@@ -314,13 +318,15 @@ void GenBackwardOpConf4ReduceGlobalStage(const std::string& op_type_name,
     op.BindGradTensorWithOpInput(grad_op.output("in_diff", 0), "in", 0);
     AddOp(grad_op);
   }
+  return Maybe<void>::Ok();
 }
 
-#define REGISTER_REDUCE_GLOBAL_STAGE_USER_OP_GRAD(op_type_name, grad_op_type_name)           \
-  REGISTER_USER_OP_GRAD(op_type_name)                                                        \
-      .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) { \
-        return GenBackwardOpConf4ReduceGlobalStage(grad_op_type_name, op, AddOp);            \
-      });
+#define REGISTER_REDUCE_GLOBAL_STAGE_USER_OP_GRAD(op_type_name, grad_op_type_name)      \
+  REGISTER_USER_OP_GRAD(op_type_name)                                                   \
+      .SetGenBackwardOpConfFn(                                                          \
+          [](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) -> Maybe<void> { \
+            return GenBackwardOpConf4ReduceGlobalStage(grad_op_type_name, op, AddOp);   \
+          });
 REGISTER_REDUCE_GLOBAL_STAGE_USER_OP_GRAD("reduce_min_global_stage", "reduce_min_global_stage_grad")
 REGISTER_REDUCE_GLOBAL_STAGE_USER_OP_GRAD("reduce_max_global_stage", "reduce_max_global_stage_grad")
 
diff --git a/oneflow/user/ops/unsorted_batch_segment_sum_op.cpp b/oneflow/user/ops/unsorted_batch_segment_sum_op.cpp
index 675bda4fa..bec371109 100644
--- a/oneflow/user/ops/unsorted_batch_segment_sum_op.cpp
+++ b/oneflow/user/ops/unsorted_batch_segment_sum_op.cpp
@@ -79,7 +79,8 @@ REGISTER_USER_OP("unsorted_batch_segment_sum")
     });
 
 REGISTER_USER_OP_GRAD("unsorted_batch_segment_sum")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       bool need_grad_data = op.NeedGenGradTensor4OpInput("data", 0);
       if (need_grad_data) {
         user_op::UserOpConfWrapperBuilder data_grad_builder(op.op_name() + "_grad");
@@ -92,6 +93,7 @@ REGISTER_USER_OP_GRAD("unsorted_batch_segment_sum")
         op.BindGradTensorWithOpInput(data_grad_op.output("out", 0), "data", 0);
         AddOp(data_grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
diff --git a/oneflow/user/ops/unsorted_segment_sum_op.cpp b/oneflow/user/ops/unsorted_segment_sum_op.cpp
index 6a072a911..d7899ad4d 100644
--- a/oneflow/user/ops/unsorted_segment_sum_op.cpp
+++ b/oneflow/user/ops/unsorted_segment_sum_op.cpp
@@ -84,7 +84,8 @@ REGISTER_USER_OP("unsorted_segment_sum")
     });
 
 REGISTER_USER_OP_GRAD("unsorted_segment_sum")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       bool need_grad_data = op.NeedGenGradTensor4OpInput("data", 0);
       if (need_grad_data) {
         user_op::UserOpConfWrapperBuilder data_grad_builder(op.op_name() + "_grad");
@@ -98,6 +99,7 @@ REGISTER_USER_OP_GRAD("unsorted_segment_sum")
         op.BindGradTensorWithOpInput(data_grad_op.output("out", 0), "data", 0);
         AddOp(data_grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP("unsorted_segment_sum_like")
diff --git a/oneflow/user/ops/upsample_op.cpp b/oneflow/user/ops/upsample_op.cpp
index f3f865e9b..6b8bb994e 100644
--- a/oneflow/user/ops/upsample_op.cpp
+++ b/oneflow/user/ops/upsample_op.cpp
@@ -456,7 +456,8 @@ REGISTER_USER_OP("upsample_trilinear_3d_grad")
     });
 
 REGISTER_USER_OP_GRAD("upsample_linear_1d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -471,10 +472,12 @@ REGISTER_USER_OP_GRAD("upsample_linear_1d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("upsample_nearest_1d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -488,10 +491,12 @@ REGISTER_USER_OP_GRAD("upsample_nearest_1d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("upsample_nearest_2d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -506,10 +511,12 @@ REGISTER_USER_OP_GRAD("upsample_nearest_2d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("upsample_bilinear_2d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -525,10 +532,12 @@ REGISTER_USER_OP_GRAD("upsample_bilinear_2d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("upsample_bicubic_2d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -544,10 +553,12 @@ REGISTER_USER_OP_GRAD("upsample_bicubic_2d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("upsample")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -564,10 +575,12 @@ REGISTER_USER_OP_GRAD("upsample")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("upsample_nearest_3d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -583,10 +596,12 @@ REGISTER_USER_OP_GRAD("upsample_nearest_3d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 REGISTER_USER_OP_GRAD("upsample_trilinear_3d")
-    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op, user_op::AddOpFn AddOp) {
+    .SetGenBackwardOpConfFn([](const user_op::UserOpWrapper& op,
+                               user_op::AddOpFn AddOp) -> Maybe<void> {
       if (op.NeedGenGradTensor4OpInput("x", 0)) {
         user_op::UserOpConfWrapperBuilder builder(op.op_name() + "_grad");
         user_op::UserOpConfWrapper grad_op =
@@ -603,6 +618,7 @@ REGISTER_USER_OP_GRAD("upsample_trilinear_3d")
         op.BindGradTensorWithOpInput(grad_op.output("dx", 0), "x", 0);
         AddOp(grad_op);
       }
+      return Maybe<void>::Ok();
     });
 
 }  // namespace oneflow
-- 
GitLab