diff --git a/oneflow/core/autograd/gradient_funcs/broadcast_binary_ops.cpp b/oneflow/core/autograd/gradient_funcs/broadcast_binary_ops.cpp index 9889579db947e24d00fa003b1f66281899984c39..b3aa09940e2e89f7c4d36ce97f551d85b571baba 100644 --- a/oneflow/core/autograd/gradient_funcs/broadcast_binary_ops.cpp +++ b/oneflow/core/autograd/gradient_funcs/broadcast_binary_ops.cpp @@ -27,10 +27,10 @@ namespace { class ReduceSumLikeModule { public: ReduceSumLikeModule(const std::string& op_name) { - identity_op_ = op_expr_helper::IdentityOp(op_name + "_identity").GetPtrOrThrow(); - reshape_like_op_ = op_expr_helper::ReshapeLikeOp(op_name + "_reshape_like").GetPtrOrThrow(); + identity_op_ = CHECK_JUST(op_expr_helper::IdentityOp(op_name + "_identity")); + reshape_like_op_ = CHECK_JUST(op_expr_helper::ReshapeLikeOp(op_name + "_reshape_like")); reduce_sum_like_op_ = - op_expr_helper::ReduceSumLikeOp({-1}, op_name + "reduce_sum_like").GetPtrOrThrow(); + CHECK_JUST(op_expr_helper::ReduceSumLikeOp({-1}, op_name + "reduce_sum_like")); } Maybe<Tensor> forward(const std::shared_ptr<Tensor>& input, diff --git a/oneflow/core/framework/op_interpreter/op_interpreter_util.cpp b/oneflow/core/framework/op_interpreter/op_interpreter_util.cpp index 385298fb0d4a73df540a1b33b8824225e4b9dbe6..e73112c020a5298eb390ddd12d8981bd97d57056 100644 --- a/oneflow/core/framework/op_interpreter/op_interpreter_util.cpp +++ b/oneflow/core/framework/op_interpreter/op_interpreter_util.cpp @@ -163,8 +163,7 @@ using Bn2BlobObjectMap = HashMap<std::string, std::shared_ptr<compatible_py::Blo blob_attr->shape(), dtype, device, is_lazy, /*requires_grad=*/false, /*is_leaf=*/false, /*retain_grad=*/false)); } else { - const auto& distribute = - compatible_py::MakeDistribute(*(parallel_attr->sbp_parallel())).GetPtrOrThrow(); + const auto& distribute = JUST(compatible_py::MakeDistribute(*(parallel_attr->sbp_parallel()))); return static_cast<std::shared_ptr<Tensor>>(ConsistentTensor::MakeTensor( blob_attr->shape(), dtype, distribute, parallel_attr->parallel_desc_symbol(), is_lazy, /*requires_grad=*/false, /*is_leaf=*/false, /*retain_grad=*/false)); diff --git a/oneflow/core/framework/tensor_arg.cpp b/oneflow/core/framework/tensor_arg.cpp index 7e057dcdb27257b8e116fb5ff86c9b98b3ba4790..e7d947d426148755a6002f86bf51791e90576d49 100644 --- a/oneflow/core/framework/tensor_arg.cpp +++ b/oneflow/core/framework/tensor_arg.cpp @@ -24,7 +24,7 @@ limitations under the License. namespace oneflow { namespace one { -TensorArg::TensorArg() : add2_op_(op_expr_helper::AddNOp(2).GetPtrOrThrow()) {} +TensorArg::TensorArg() : add2_op_(CHECK_JUST(op_expr_helper::AddNOp(2))) {} bool TensorArg::Empty() const { return !acc_tensor_; } diff --git a/oneflow/core/job/parallel_desc.cpp b/oneflow/core/job/parallel_desc.cpp index cfd2de702be079711a41a6acdeb9d86ff966a3ea..ace939a0f5c2ce85378b2b7138e1646c012cc7bc 100644 --- a/oneflow/core/job/parallel_desc.cpp +++ b/oneflow/core/job/parallel_desc.cpp @@ -84,8 +84,7 @@ Maybe<ParallelDesc> ParallelDesc::New(int64_t symbol_id, const ParallelConf& par Maybe<ParallelDesc> ParallelDesc::New(const std::string& device_tag, const std::vector<std::string>& machine_device_ids, const std::shared_ptr<Shape>& hierarchy) { - const auto parallel_conf = - MakeParallelConf(device_tag, machine_device_ids, hierarchy).GetPtrOrThrow(); + const auto parallel_conf = JUST(MakeParallelConf(device_tag, machine_device_ids, hierarchy)); std::shared_ptr<ParallelDesc> parallel_desc; JUST(LogicalRun([¶llel_desc, ¶llel_conf](InstructionsBuilder* builder) -> Maybe<void> { parallel_desc = JUST(builder->GetParallelDescSymbol(parallel_conf));