Skip to content
Snippets Groups Projects
Commit 7363e57d authored by zhouneng's avatar zhouneng
Browse files

[gnmt_v2]enable scalar summary only in graph mode

parent c24158c7
No related branches found
No related tags found
No related merge requests found
......@@ -79,7 +79,7 @@ class DynamicRNNCell(nn.Cell):
if self.is_ascend:
w = self.cast(self.dynamicRNN_w, self.compute_type)
b = self.cast(self.dynamicRNN_b, self.compute_type)
output, hn, cn = self.rnn(x, w, b, None, init_h, init_c)
output, hn, cn, _, _, _, _, _ = self.rnn(x, w, b, None, init_h, init_c)
else:
output, (hn, cn) = self.lstm(x, (init_h, init_c))
return output, hn, cn
......
......@@ -16,6 +16,7 @@
import numpy as np
from mindspore import context
from mindspore.common import dtype as mstype
from mindspore.common.initializer import initializer
from mindspore.nn import Optimizer
......@@ -230,6 +231,8 @@ class Adam(Optimizer):
self.lr_scalar = P.ScalarSummary()
self.exec_mode = context.get_context("mode")
def construct(self, gradients):
"""Adam optimizer."""
params = self.parameters
......@@ -239,7 +242,9 @@ class Adam(Optimizer):
gradients = self.scale_grad(gradients)
lr = self.get_lr()
self.lr_scalar("learning_rate", lr)
#currently, Summary operators only support graph mode
if self.exec_mode == context.GRAPH_MODE:
self.lr_scalar("learning_rate", lr)
beta1_power = self.beta1_power * self.beta1
self.beta1_power = beta1_power
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment