Skip to content
Snippets Groups Projects
Unverified Commit 75d3fa43 authored by Yinggang Wang's avatar Yinggang Wang Committed by GitHub
Browse files

style(*): rename now_grad_arg to current_grad (#5466)


Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
parent 46105b56
No related branches found
No related tags found
No related merge requests found
......@@ -32,24 +32,24 @@ namespace {
bool IsReadyToRun(const std::vector<std::shared_ptr<AutogradMeta>>& out_meta_datas) {
return std::any_of(out_meta_datas.begin(), out_meta_datas.end(),
[](const std::shared_ptr<AutogradMeta>& meta_data) {
return !meta_data->now_grad_arg()->Empty();
return !meta_data->current_grad()->Empty();
});
}
Maybe<void> CopyOrAccGrad(AutogradMeta* autograd_meta, bool autograd_mode) {
autograd::AutoGradMode mode(autograd_mode);
auto now_grad = JUST(autograd_meta->now_grad_arg()->GetAccTensor());
if (!now_grad) { return Maybe<void>::Ok(); }
auto current_grad = JUST(autograd_meta->current_grad()->GetAccTensor());
if (!current_grad) { return Maybe<void>::Ok(); }
for (const auto& hook : autograd_meta->hooks()) {
auto new_grad = hook(now_grad);
if (new_grad) { now_grad = new_grad; }
auto new_grad = hook(current_grad);
if (new_grad) { current_grad = new_grad; }
}
if (autograd_meta->acc_grad()) {
const auto& output =
JUST(functional::Add(autograd_meta->acc_grad(), now_grad, /*inplace=*/true));
JUST(functional::Add(autograd_meta->acc_grad(), current_grad, /*inplace=*/true));
autograd_meta->set_acc_grad(output);
} else {
autograd_meta->set_acc_grad(now_grad);
autograd_meta->set_acc_grad(current_grad);
}
return Maybe<void>::Ok();
}
......@@ -103,7 +103,7 @@ Maybe<void> FunctionNode::AccGrad4LeafTensor(bool create_graph) {
void FunctionNode::ReleaseOutTensorArgs() {
for (const std::shared_ptr<AutogradMeta>& meta_data : output_meta_datas_) {
meta_data->now_grad_arg()->Release();
meta_data->current_grad()->Release();
}
}
......@@ -121,10 +121,10 @@ Maybe<bool> FunctionNode::Apply(bool create_graph) {
TensorTuple input_grads(input_meta_datas_.size());
TensorTuple output_grads(output_meta_datas_.size());
for (int i = 0; i < output_meta_datas_.size(); ++i) {
if (output_meta_datas_.at(i)->now_grad_arg()->Empty()) {
if (output_meta_datas_.at(i)->current_grad()->Empty()) {
output_grads.at(i) = JUST(output_tensor_infos_.at(i).zeros());
} else {
output_grads.at(i) = JUST(output_meta_datas_.at(i)->now_grad_arg()->GetAccTensor());
output_grads.at(i) = JUST(output_meta_datas_.at(i)->current_grad()->GetAccTensor());
}
}
JUST((*backward_fn_)(output_grads, &input_grads, create_graph));
......@@ -135,7 +135,7 @@ Maybe<bool> FunctionNode::Apply(bool create_graph) {
<< " calculate grad for tensor which requires_grad is False. Please submit an issue in "
"`https://github.com/Oneflow-Inc/oneflow/issues` and we will fix it as soon as "
"possiable";
JUST(input_meta_datas_.at(i)->now_grad_arg()->PushPartialTensor(input_grads.at(i)));
JUST(input_meta_datas_.at(i)->current_grad()->PushPartialTensor(input_grads.at(i)));
}
}
return true;
......@@ -157,7 +157,7 @@ Maybe<void> StackAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const Tensor
bool create_graph) {
ClearReleasedFunctionNodes();
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
// Runs each FunctionNode
for (const auto& weak_func_node : node_list_) {
......@@ -178,14 +178,14 @@ Maybe<TensorTuple> StackAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
ClearReleasedFunctionNodes();
std::shared_ptr<TensorTuple> input_now_grads = std::make_shared<TensorTuple>(inputs.size());
std::shared_ptr<TensorTuple> input_current_grad = std::make_shared<TensorTuple>(inputs.size());
std::vector<bool> ori_retain_grad(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
ori_retain_grad.at(i) = inputs.at(i)->retain_grad();
JUST(inputs.at(i)->set_retain_grad(true));
}
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
// Runs each FunctionNode
for (const auto& weak_func_node : node_list_) {
......@@ -199,14 +199,14 @@ Maybe<TensorTuple> StackAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
}
// Gets input grads and resume retain_grad
for (int i = 0; i < inputs.size(); ++i) {
input_now_grads->at(i) = JUST(inputs.at(i)->acc_grad());
input_current_grad->at(i) = JUST(inputs.at(i)->acc_grad());
if (!ori_retain_grad.at(i)) {
JUST(inputs.at(i)->set_acc_grad(nullptr));
JUST(inputs.at(i)->set_retain_grad(false));
}
}
if (!retain_graph) { ClearEngine(); }
return input_now_grads;
return input_current_grad;
}
Maybe<FunctionNode> StackAutogradEngine::AddBackwardFuncPtr(
......@@ -385,7 +385,7 @@ Maybe<void> GraphAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const Tensor
bool retain_graph,
bool create_graph) {
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
GraphTask graph_task(outputs, retain_graph, create_graph);
JUST(graph_task.ComputeDependencies());
......@@ -396,7 +396,7 @@ Maybe<void> GraphAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const Tensor
Maybe<TensorTuple> GraphAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
std::shared_ptr<TensorTuple> input_now_grads = std::make_shared<TensorTuple>(inputs.size());
std::shared_ptr<TensorTuple> input_current_grad = std::make_shared<TensorTuple>(inputs.size());
GraphTask graph_task(outputs, retain_graph, create_graph);
std::vector<bool> ori_retain_grad(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
......@@ -404,7 +404,7 @@ Maybe<TensorTuple> GraphAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
JUST(inputs.at(i)->set_retain_grad(true));
}
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
JUST(graph_task.ComputeDependenciesAndPruneNode(inputs));
......@@ -412,13 +412,13 @@ Maybe<TensorTuple> GraphAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
// Gets input grads and resume retain_grad
for (int i = 0; i < inputs.size(); ++i) {
input_now_grads->at(i) = JUST(inputs.at(i)->acc_grad());
input_current_grad->at(i) = JUST(inputs.at(i)->acc_grad());
if (!ori_retain_grad.at(i)) {
JUST(inputs.at(i)->set_acc_grad(nullptr));
JUST(inputs.at(i)->set_retain_grad(false));
}
}
return input_now_grads;
return input_current_grad;
}
Maybe<FunctionNode> GraphAutogradEngine::AddBackwardFuncPtr(
......
......@@ -39,11 +39,11 @@ class AutogradMeta final {
: is_leaf_(is_leaf),
requires_grad_(requires_grad),
retain_grad_(false),
now_grad_arg_(new TensorArg) {}
current_grad_(new TensorArg) {}
// Getters
const std::shared_ptr<Tensor>& acc_grad() const { return acc_grad_; }
const std::shared_ptr<TensorArg>& now_grad_arg() const { return now_grad_arg_; }
const std::shared_ptr<TensorArg>& current_grad() const { return current_grad_; }
bool requires_grad() const { return requires_grad_; }
bool is_leaf() const { return is_leaf_; }
bool retain_grad() const { return retain_grad_; }
......@@ -68,7 +68,7 @@ class AutogradMeta final {
bool retain_grad_;
std::shared_ptr<Tensor> acc_grad_;
std::shared_ptr<TensorArg> now_grad_arg_;
std::shared_ptr<TensorArg> current_grad_;
std::vector<Hook> hooks_;
};
......
......@@ -82,7 +82,7 @@ class Tensor {
virtual bool retain_grad() const = 0;
virtual std::shared_ptr<const FunctionNode> grad_fn_node() const = 0;
virtual Maybe<Tensor> acc_grad() const = 0;
virtual Maybe<TensorArg> now_grad_arg() const = 0;
virtual Maybe<TensorArg> current_grad() const = 0;
virtual Maybe<Tensor> detach() const = 0;
virtual Maybe<Tensor> clone() const = 0;
virtual std::shared_ptr<Tensor> data() const = 0;
......@@ -116,7 +116,7 @@ class TensorIf : public Tensor {
// Getters for autograd
// acc_grad is tensor's accumulated grad in more than once backward operation,
// and now_grad_arg is temporary grad to shared data with different FunctionNode
// and current_grad is temporary grad to shared data with different FunctionNode
std::shared_ptr<const FunctionNode> grad_fn_node() const override { return grad_fn_node_; }
// Setters for autograd
......@@ -168,7 +168,7 @@ class MirroredTensor final : public TensorIf<MirroredTensor>,
// Getters for autograd
Maybe<Tensor> acc_grad() const override { return impl_->acc_grad(); }
Maybe<TensorArg> now_grad_arg() const override { return impl_->now_grad_arg(); }
Maybe<TensorArg> current_grad() const override { return impl_->current_grad(); }
bool requires_grad() const override { return impl_->requires_grad(); }
bool is_leaf() const override { return impl_->is_leaf(); }
bool retain_grad() const override { return impl_->retain_grad(); }
......@@ -261,7 +261,7 @@ class ConsistentTensor final : public TensorIf<ConsistentTensor> {
// Getters for autograd
Maybe<Tensor> acc_grad() const override { return impl_->acc_grad(); }
Maybe<TensorArg> now_grad_arg() const override { return impl_->now_grad_arg(); }
Maybe<TensorArg> current_grad() const override { return impl_->current_grad(); }
bool requires_grad() const override { return impl_->requires_grad(); }
bool is_leaf() const override { return impl_->is_leaf(); }
bool retain_grad() const override { return impl_->retain_grad(); }
......
......@@ -37,9 +37,9 @@ Maybe<Tensor> TensorImpl::acc_grad() const {
return autograd_meta_->acc_grad();
}
Maybe<TensorArg> TensorImpl::now_grad_arg() const {
Maybe<TensorArg> TensorImpl::current_grad() const {
CHECK_NOTNULL_OR_RETURN(autograd_meta_);
return autograd_meta_->now_grad_arg();
return autograd_meta_->current_grad();
}
Maybe<void> TensorImpl::set_acc_grad(const std::shared_ptr<Tensor>& grad) {
......
......@@ -66,7 +66,7 @@ class TensorImpl {
// Getters for autograd
Maybe<Tensor> acc_grad() const;
Maybe<TensorArg> now_grad_arg() const;
Maybe<TensorArg> current_grad() const;
bool requires_grad() const { return requires_grad_; }
bool is_leaf() const { return is_leaf_; }
bool retain_grad() const { return autograd_meta_->retain_grad(); }
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment