Skip to content
Snippets Groups Projects
Commit 3d689058 authored by zhaoting's avatar zhaoting
Browse files

change to_tensor to init_data

parent bb7f21ac
No related branches found
No related tags found
No related merge requests found
Showing
with 65 additions and 65 deletions
......@@ -30,7 +30,7 @@ def bias_init_zeros(shape):
def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'):
"""Conv2D wrapper."""
shape = (out_channels, in_channels, kernel_size, kernel_size)
weights = ms.common.initializer.initializer("XavierUniform", shape=shape, dtype=ms.float32).to_tensor()
weights = ms.common.initializer.initializer("XavierUniform", shape=shape, dtype=ms.float32).init_data()
shape_bias = (out_channels,)
biass = bias_init_zeros(shape_bias)
return nn.Conv2d(in_channels, out_channels,
......
......@@ -95,18 +95,18 @@ class Rcnn(nn.Cell):
shape_0 = (self.rcnn_fc_out_channels, representation_size)
weights_0 = ms.common.initializer.initializer("XavierUniform", shape=shape_0[::-1], \
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
weights_1 = ms.common.initializer.initializer("XavierUniform", shape=shape_1[::-1], \
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0)
self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1)
cls_weight = ms.common.initializer.initializer('Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
reg_weight = ms.common.initializer.initializer('Normal', shape=[self.num_classes_fronted * 4,
self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight)
self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, self.num_classes_fronted * 4, reg_weight)
......
......@@ -165,18 +165,18 @@ class RPN(nn.Cell):
shp_weight_conv = (feat_channels, in_channels, 3, 3)
shp_bias_conv = (feat_channels,)
weight_conv = ms.common.initializer.initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).to_tensor()
bias_conv = ms.common.initializer.initializer(0, shape=shp_bias_conv, dtype=self.ms_type).to_tensor()
weight_conv = ms.common.initializer.initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).init_data()
bias_conv = ms.common.initializer.initializer(0, shape=shp_bias_conv, dtype=self.ms_type).init_data()
shp_weight_cls = (num_anchors * cls_out_channels, feat_channels, 1, 1)
shp_bias_cls = (num_anchors * cls_out_channels,)
weight_cls = ms.common.initializer.initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).to_tensor()
bias_cls = ms.common.initializer.initializer(0, shape=shp_bias_cls, dtype=self.ms_type).to_tensor()
weight_cls = ms.common.initializer.initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).init_data()
bias_cls = ms.common.initializer.initializer(0, shape=shp_bias_cls, dtype=self.ms_type).init_data()
shp_weight_reg = (num_anchors * 4, feat_channels, 1, 1)
shp_bias_reg = (num_anchors * 4,)
weight_reg = ms.common.initializer.initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).to_tensor()
bias_reg = ms.common.initializer.initializer(0, shape=shp_bias_reg, dtype=self.ms_type).to_tensor()
weight_reg = ms.common.initializer.initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).init_data()
bias_reg = ms.common.initializer.initializer(0, shape=shp_bias_reg, dtype=self.ms_type).init_data()
for i in range(num_layers):
rpn_reg_cls_block = RpnRegClsBlock(in_channels, feat_channels, num_anchors, cls_out_channels, \
......
......@@ -30,7 +30,7 @@ def bias_init_zeros(shape):
def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'):
"""Conv2D wrapper."""
shape = (out_channels, in_channels, kernel_size, kernel_size)
weights = initializer("XavierUniform", shape=shape, dtype=mstype.float32).to_tensor()
weights = initializer("XavierUniform", shape=shape, dtype=mstype.float32).init_data()
shape_bias = (out_channels,)
biass = bias_init_zeros(shape_bias)
return nn.Conv2d(in_channels, out_channels,
......
......@@ -88,18 +88,18 @@ class Rcnn(nn.Cell):
self.test_batch_size = cfg.test_batch_size
shape_0 = (self.rcnn_fc_out_channels, representation_size)
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).to_tensor()
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).init_data()
shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).to_tensor()
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).init_data()
self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0)
self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1)
cls_weight = initializer('Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
reg_weight = initializer('Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
reg_weight_agn = initializer('Normal', shape=[4, self.rcnn_fc_out_channels][::-1],
dtype=mstype.float32).to_tensor()
dtype=mstype.float32).init_data()
self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight)
self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight)
self.reg_scores_class_ang = DenseNoTranpose(self.rcnn_fc_out_channels, 4, reg_weight_agn)
......
......@@ -88,18 +88,18 @@ class Rcnn_1(nn.Cell):
self.test_batch_size = cfg.test_batch_size
shape_0 = (self.rcnn_fc_out_channels, representation_size)
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).to_tensor()
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).init_data()
shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).to_tensor()
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).init_data()
self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0)
self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1)
cls_weight = initializer('Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
reg_weight = initializer('Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
reg_weight_agn = initializer('Normal', shape=[4, self.rcnn_fc_out_channels][::-1],
dtype=mstype.float32).to_tensor()
dtype=mstype.float32).init_data()
self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight)
self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight)
self.reg_scores_class_ang = DenseNoTranpose(self.rcnn_fc_out_channels, 4, reg_weight_agn)
......
......@@ -88,16 +88,16 @@ class Rcnn_2(nn.Cell):
self.test_batch_size = cfg.test_batch_size
shape_0 = (self.rcnn_fc_out_channels, representation_size)
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).to_tensor()
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).init_data()
shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).to_tensor()
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).init_data()
self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0)
self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1)
cls_weight = initializer('Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
reg_weight = initializer('Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight)
self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight)
......
......@@ -167,18 +167,18 @@ class RPN(nn.Cell):
shp_weight_conv = (feat_channels, in_channels, 3, 3)
shp_bias_conv = (feat_channels,)
weight_conv = initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).to_tensor()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).to_tensor()
weight_conv = initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).init_data()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).init_data()
shp_weight_cls = (num_anchors * cls_out_channels, feat_channels, 1, 1)
shp_bias_cls = (num_anchors * cls_out_channels,)
weight_cls = initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).to_tensor()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).to_tensor()
weight_cls = initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).init_data()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).init_data()
shp_weight_reg = (num_anchors * 4, feat_channels, 1, 1)
shp_bias_reg = (num_anchors * 4,)
weight_reg = initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).to_tensor()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).to_tensor()
weight_reg = initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).init_data()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).init_data()
for i in range(num_layers):
rpn_reg_cls_block = RpnRegClsBlock(in_channels, feat_channels, num_anchors, cls_out_channels, \
......
......@@ -52,7 +52,7 @@ def calculate_fan_in_and_fan_out(shape):
def get_conv_bias(cell):
"""Bias initializer for conv."""
weight = initializer.initializer(initializer.HeUniform(negative_slope=math.sqrt(5)),
cell.weight.shape, cell.weight.dtype).to_tensor()
cell.weight.shape, cell.weight.dtype).init_data()
fan_in, _ = calculate_fan_in_and_fan_out(weight.shape)
bound = 1 / math.sqrt(fan_in)
return initializer.initializer(initializer.Uniform(scale=bound),
......
......@@ -554,7 +554,7 @@ def calculate_fan_in_and_fan_out(shape):
def get_conv_bias(cell):
"""Bias initializer for conv."""
weight = initializer.initializer(initializer.HeUniform(negative_slope=math.sqrt(5)),
cell.weight.shape, cell.weight.dtype).to_tensor()
cell.weight.shape, cell.weight.dtype).init_data()
fan_in, _ = calculate_fan_in_and_fan_out(weight.shape)
bound = 1 / math.sqrt(fan_in)
return initializer.initializer(initializer.Uniform(scale=bound),
......
......@@ -47,7 +47,7 @@ def calculate_fan_in_and_fan_out(shape):
def get_conv_bias(cell):
weight = initializer.initializer(initializer.HeUniform(negative_slope=math.sqrt(5)),
cell.weight.shape, cell.weight.dtype).to_tensor()
cell.weight.shape, cell.weight.dtype).init_data()
fan_in, _ = calculate_fan_in_and_fan_out(weight.shape)
bound = 1 / math.sqrt(fan_in)
return initializer.initializer(initializer.Uniform(scale=bound),
......
......@@ -38,7 +38,7 @@ def _make_layer(base, args, batch_norm):
weight = 'ones'
if args.initialize_mode == "XavierUniform":
weight_shape = (v, in_channels, 3, 3)
weight = initializer('XavierUniform', shape=weight_shape, dtype=mstype.float32).to_tensor()
weight = initializer('XavierUniform', shape=weight_shape, dtype=mstype.float32).init_data()
conv2d = nn.Conv2d(in_channels=in_channels,
out_channels=v,
......
......@@ -52,7 +52,7 @@ def calculate_fan_in_and_fan_out(shape):
def get_conv_bias(cell):
"""Bias initializer for conv."""
weight = initializer.initializer(initializer.HeUniform(negative_slope=math.sqrt(5)),
cell.weight.shape, cell.weight.dtype).to_tensor()
cell.weight.shape, cell.weight.dtype).init_data()
fan_in, _ = calculate_fan_in_and_fan_out(weight.shape)
bound = 1 / math.sqrt(fan_in)
return initializer.initializer(initializer.Uniform(scale=bound),
......
......@@ -30,7 +30,7 @@ def bias_init_zeros(shape):
def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'):
"""Conv2D wrapper."""
shape = (out_channels, in_channels, kernel_size, kernel_size)
weights = initializer("XavierUniform", shape=shape, dtype=mstype.float32).to_tensor()
weights = initializer("XavierUniform", shape=shape, dtype=mstype.float32).init_data()
shape_bias = (out_channels,)
biass = bias_init_zeros(shape_bias)
return nn.Conv2d(in_channels, out_channels,
......
......@@ -88,16 +88,16 @@ class Rcnn(nn.Cell):
self.test_batch_size = cfg.test_batch_size
shape_0 = (self.rcnn_fc_out_channels, representation_size)
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).to_tensor()
weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).init_data()
shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).to_tensor()
weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).init_data()
self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0)
self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1)
cls_weight = initializer('Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
reg_weight = initializer('Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type).to_tensor()
dtype=self.ms_type).init_data()
self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight)
self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight)
......
......@@ -168,18 +168,18 @@ class RPN(nn.Cell):
shp_weight_conv = (feat_channels, in_channels, 3, 3)
shp_bias_conv = (feat_channels,)
weight_conv = initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).to_tensor()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).to_tensor()
weight_conv = initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).init_data()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).init_data()
shp_weight_cls = (num_anchors * cls_out_channels, feat_channels, 1, 1)
shp_bias_cls = (num_anchors * cls_out_channels,)
weight_cls = initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).to_tensor()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).to_tensor()
weight_cls = initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).init_data()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).init_data()
shp_weight_reg = (num_anchors * 4, feat_channels, 1, 1)
shp_bias_reg = (num_anchors * 4,)
weight_reg = initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).to_tensor()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).to_tensor()
weight_reg = initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).init_data()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).init_data()
for i in range(num_layers):
rpn_reg_cls_block = RpnRegClsBlock(in_channels, feat_channels, num_anchors, cls_out_channels, \
......
......@@ -34,7 +34,7 @@ def _conv(
shape = (out_channels, in_channels, kernel_size, kernel_size)
weights = initializer(
"XavierUniform", shape=shape, dtype=mstype.float32
).to_tensor()
).init_data()
shape_bias = (out_channels,)
biass = bias_init_zeros(shape_bias)
return nn.Conv2d(
......
......@@ -101,11 +101,11 @@ class Rcnn(nn.Cell):
shape_0 = (self.rcnn_fc_out_channels, representation_size)
weights_0 = initializer(
"XavierUniform", shape=shape_0[::-1], dtype=self.ms_type
).to_tensor()
).init_data()
shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
weights_1 = initializer(
"XavierUniform", shape=shape_1[::-1], dtype=self.ms_type
).to_tensor()
).init_data()
self.shared_fc_0 = DenseNoTranpose(
representation_size, self.rcnn_fc_out_channels, weights_0
)
......@@ -117,12 +117,12 @@ class Rcnn(nn.Cell):
"Normal",
shape=[num_classes, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type,
).to_tensor()
).init_data()
reg_weight = initializer(
"Normal",
shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
dtype=self.ms_type,
).to_tensor()
).init_data()
self.cls_scores = DenseNoTranpose(
self.rcnn_fc_out_channels, num_classes, cls_weight
)
......
......@@ -220,22 +220,22 @@ class RPN(nn.Cell):
shp_bias_conv = (feat_channels,)
weight_conv = initializer(
"Normal", shape=shp_weight_conv, dtype=self.ms_type
).to_tensor()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).to_tensor()
).init_data()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).init_data()
shp_weight_cls = (num_anchors * cls_out_channels, feat_channels, 1, 1)
shp_bias_cls = (num_anchors * cls_out_channels,)
weight_cls = initializer(
"Normal", shape=shp_weight_cls, dtype=self.ms_type
).to_tensor()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).to_tensor()
).init_data()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).init_data()
shp_weight_reg = (num_anchors * 4, feat_channels, 1, 1)
shp_bias_reg = (num_anchors * 4,)
weight_reg = initializer(
"Normal", shape=shp_weight_reg, dtype=self.ms_type
).to_tensor()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).to_tensor()
).init_data()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).init_data()
for i in range(num_layers):
rpn_reg_cls_block = RpnRegClsBlock(
......
......@@ -166,18 +166,18 @@ class RPN(nn.Cell):
shp_weight_conv = (feat_channels, in_channels, 3, 3)
shp_bias_conv = (feat_channels,)
weight_conv = initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).to_tensor()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).to_tensor()
weight_conv = initializer('Normal', shape=shp_weight_conv, dtype=self.ms_type).init_data()
bias_conv = initializer(0, shape=shp_bias_conv, dtype=self.ms_type).init_data()
shp_weight_cls = (num_anchors * cls_out_channels, feat_channels, 1, 1)
shp_bias_cls = (num_anchors * cls_out_channels,)
weight_cls = initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).to_tensor()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).to_tensor()
weight_cls = initializer('Normal', shape=shp_weight_cls, dtype=self.ms_type).init_data()
bias_cls = initializer(0, shape=shp_bias_cls, dtype=self.ms_type).init_data()
shp_weight_reg = (num_anchors * 4, feat_channels, 1, 1)
shp_bias_reg = (num_anchors * 4,)
weight_reg = initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).to_tensor()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).to_tensor()
weight_reg = initializer('Normal', shape=shp_weight_reg, dtype=self.ms_type).init_data()
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).init_data()
for i in range(num_layers):
rpn_reg_cls_block = RpnRegClsBlock(in_channels, feat_channels, num_anchors, cls_out_channels, \
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment