diff --git a/research/cv/NFNet/README_CN.md b/research/cv/NFNet/README_CN.md
index 86be4a43bd2385818a6a9d9e25a92d64413bf689..d46125b2b0d7ebda8f1d8f4fde4424ec918beb7d 100644
--- a/research/cv/NFNet/README_CN.md
+++ b/research/cv/NFNet/README_CN.md
@@ -51,7 +51,7 @@
 └─dataset
     ├─train                 # 训练数据集
     └─val                   # 评估数据集
-```
+ ```
 
 # [特性](#目录)
 
@@ -154,7 +154,6 @@
     # ===== Hardware setup ===== #
     num_parallel_workers: 16                # 数据预处理线程数
     device_target: Ascend                   # 硬件选择
-
     # ===== Model config ===== #
     input_size: 192                         # 训练图像大小
     test_input_size: 256                    # 测试图像大小
@@ -182,7 +181,7 @@
 
   # 使用python启动单卡运行评估示例
   python eval.py --device_id 0 --device_target Ascend --config ./src/configs/dm_nfnet_f0.yaml
-  --pretrained ./ckpt_0/dm_nfnet_f0.ckpt  --inference True > ./eval.log 2>&1 &
+  --pretrained ./ckpt_0/dm_nfnet_f0.ckpt > ./eval.log 2>&1 &
 
   # 使用脚本启动单卡运行评估示例
   bash ./scripts/run_eval_ascend.sh [DEVICE_ID] [CONFIG_PATH] [CHECKPOINT_PATH]
@@ -211,7 +210,7 @@
 
   # 使用python启动单卡运行评估示例
   python eval.py --device_id 0 --device_target GPU --config ./src/configs/dm_nfnet_f0.yaml \
-  --pretrained ./ckpt_0/dm_nfnet_f0.ckpt  --inference True > ./eval.log 2>&1 &
+  --pretrained ./ckpt_0/dm_nfnet_f0.ckpt > ./eval.log 2>&1 &
 
   # 使用脚本启动单卡运行评估示例
   bash ./scripts/run_eval_gpu.sh [DEVICE_ID] [CONFIG_PATH] [CHECKPOINT_PATH]
@@ -222,7 +221,7 @@
 ### 导出
 
   ```shell
-  python export.py --pretrained [CKPT_FILE] --config [CONFIG_PATH] --device_target [DEVICE_TARGET] --inference True
+  python export.py --pretrained [CKPT_FILE] --config [CONFIG_PATH] --device_target [DEVICE_TARGET]
   ```
 
 导出的模型会以模型的结构名字命名并且保存在当前目录下
diff --git a/research/cv/NFNet/eval.py b/research/cv/NFNet/eval.py
index f140fb39294d953f417268ba23c2b0c4b77abb8e..f26694e15847a535f5cb222e004b1c1009921b2f 100644
--- a/research/cv/NFNet/eval.py
+++ b/research/cv/NFNet/eval.py
@@ -20,7 +20,6 @@ from mindspore import nn
 from mindspore.common import set_seed
 
 from src.args import args
-from src.models.NFNet.std_conv import ScaledStdConv2dUnit
 from src.tools.cell import cast_amp
 from src.tools.criterion import get_criterion, NetWithLoss
 from src.tools.get_misc import get_dataset, set_device, get_model, pretrained, get_train_one_step
@@ -48,10 +47,7 @@ def main():
     net_with_loss = NetWithLoss(net, criterion)
     if args.pretrained:
         pretrained(args, net)
-    for name, cell in net.cells_and_names():
-        if isinstance(cell, ScaledStdConv2dUnit):
-            cell.weight.set_data(cell.wise_normalize())
-            print(f"=> uniwise {name}'s weight for inference")
+
     data = get_dataset(args, training=False)
     batch_num = data.val_dataset.get_dataset_size()
     optimizer = get_optimizer(args, net, batch_num)
diff --git a/research/cv/NFNet/export.py b/research/cv/NFNet/export.py
index d68151fa296689588ee1c2aa74f2b6d64be0ab48..62b06e62dcf37f2df2c1d5844e81a67ba56ce799 100644
--- a/research/cv/NFNet/export.py
+++ b/research/cv/NFNet/export.py
@@ -22,7 +22,6 @@ from mindspore import Tensor, load_checkpoint, load_param_into_net, export, cont
 from mindspore import dtype as mstype
 
 from src.args import args
-from src.models.NFNet.std_conv import ScaledStdConv2dUnit
 from src.tools.cell import cast_amp
 from src.tools.criterion import get_criterion, NetWithLoss
 from src.tools.get_misc import get_model
@@ -44,10 +43,6 @@ if __name__ == '__main__':
 
     net.set_train(False)
     net.to_float(mstype.float32)
-    for name, cell in net.cells_and_names():
-        if isinstance(cell, ScaledStdConv2dUnit):
-            cell.weight.set_data(cell.wise_normalize())
-            print(f"=> uniwise {name}'s weight for inference")
 
     input_arr = Tensor(np.zeros([1, 3, args.test_input_size, args.test_input_size], np.float32))
     export(net, input_arr, file_name=args.arch, file_format=args.file_format)
diff --git a/research/cv/NFNet/scripts/run_eval_ascend.sh b/research/cv/NFNet/scripts/run_eval_ascend.sh
index c553726f4531edbaac8853ea93b8f800da813271..bfcb023afc9de6b3d5e09261d24b15766a53f423 100644
--- a/research/cv/NFNet/scripts/run_eval_ascend.sh
+++ b/research/cv/NFNet/scripts/run_eval_ascend.sh
@@ -31,5 +31,5 @@ cd ./evaluation_ascend || exit
 echo  "start training for device id $DEVICE_ID"
 env > env.log
 python ../eval.py --device_target=Ascend --device_id=$DEVICE_ID --config=$CONFIG_PATH \
---pretrained=$CHECKPOINT_PATH --inference=True > eval.log 2>&1 &
+--pretrained=$CHECKPOINT_PATH > eval.log 2>&1 &
 cd ../
diff --git a/research/cv/NFNet/scripts/run_eval_gpu.sh b/research/cv/NFNet/scripts/run_eval_gpu.sh
index ea3e903821b3fb0783aa9f5dc1c2e460893c21dc..854b72604c95392f8d15652b9e681aa8ec56ca67 100644
--- a/research/cv/NFNet/scripts/run_eval_gpu.sh
+++ b/research/cv/NFNet/scripts/run_eval_gpu.sh
@@ -15,7 +15,7 @@
 # ============================================================================
 if [ $# -lt 3 ]
 then
-    echo "Usage: bash ./scripts/run_standalone_train_ascend.sh [DEVICE_ID] [CONFIG_PATH] [CHECKPOINT_PATH]"
+    echo "Usage: bash ./scripts/run_standalone_train_gpu.sh [DEVICE_ID] [CONFIG_PATH] [CHECKPOINT_PATH]"
 exit 1
 fi
 
@@ -25,11 +25,11 @@ CHECKPOINT_PATH=$3
 export RANK_SIZE=1
 export DEVICE_NUM=1
 
-rm -rf evaluation_ascend
-mkdir ./evaluation_ascend
-cd ./evaluation_ascend || exit
+rm -rf evaluation_gpu
+mkdir ./evaluation_gpu
+cd ./evaluation_gpu || exit
 echo  "start training for device id $DEVICE_ID"
 env > env.log
 python ../eval.py --device_target=GPU --device_id=$DEVICE_ID --config=$CONFIG_PATH \
---pretrained=$CHECKPOINT_PATH --inference=True > eval.log 2>&1 &
+--pretrained=$CHECKPOINT_PATH > eval.log 2>&1 &
 cd ../
diff --git a/research/cv/NFNet/src/args.py b/research/cv/NFNet/src/args.py
index 8125aa3f510b751d1cc6eb57e5c64f8edf1a1c46..323b4052b9c62a66092e4cefc1c73bb58401826a 100644
--- a/research/cv/NFNet/src/args.py
+++ b/research/cv/NFNet/src/args.py
@@ -51,8 +51,6 @@ def parse_arguments():
     parser.add_argument("--file_format", type=str, choices=["AIR", "MINDIR"], default="MINDIR", help="file format")
     parser.add_argument("--in_channel", default=3, type=int)
     parser.add_argument("--is_dynamic_loss_scale", default=1, type=int, help="is_dynamic_loss_scale ")
-    parser.add_argument("--inference", default=False, type=ast.literal_eval,
-                        help="inference mode")
     parser.add_argument("--keep_checkpoint_max", default=20, type=int, help="keep checkpoint max num")
     parser.add_argument("--optimizer", help="Which optimizer to use", default="sgd")
     parser.add_argument("--set", help="name of dataset", type=str, default="ImageNet")
diff --git a/research/cv/NFNet/src/models/NFNet/std_conv.py b/research/cv/NFNet/src/models/NFNet/std_conv.py
index 70b725f4962a37119e2d0d4306c10c263bafdcc3..cfc6c389e8b1b53e16245226421cff19e97b96f9 100644
--- a/research/cv/NFNet/src/models/NFNet/std_conv.py
+++ b/research/cv/NFNet/src/models/NFNet/std_conv.py
@@ -26,6 +26,7 @@ Code: https://github.com/joe-siyuan-qiao/WeightStandardization
 ScaledStdConv:
 Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets`
     - https://arxiv.org/abs/2101.08692
+
 Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets
 
 Hacked together by / copyright Ross Wightman, 2021.
@@ -58,25 +59,15 @@ class ScaledStdConv2dUnit(nn.Conv2d):
         self.fan_in = Tensor(np.prod(self.weight[0].shape), mstype.float32)  # gamma * 1 / sqrt(fan-in)
         self.gamma = Tensor(gamma, mstype.float32)
         self.eps = eps
-        self.inference = args.inference
 
-    def wise_normalize(self):
+    def construct(self, x):
+        """ScaledStdConv2dUnit Construct"""
+
         mean = ops.ReduceMean(True)(self.weight, (1, 2, 3))
         var = ops.ReduceMean(True)(ops.Square()(self.weight - mean), (1, 2, 3))
         scale = ops.Rsqrt()(ops.Maximum()(var * self.fan_in, self.eps))
         weight = (self.weight - mean) * self.gain * scale
-        return weight
-
-    def construct(self, x):
-        """ScaledStdConv2dUnit Construct"""
-        if self.inference:
-            x = self.conv2d(x, self.weight)
-        else:
-            mean = ops.ReduceMean(True)(self.weight, (1, 2, 3))
-            var = ops.ReduceMean(True)(ops.Square()(self.weight - mean), (1, 2, 3))
-            scale = ops.Rsqrt()(ops.Maximum()(var * self.fan_in, self.eps))
-            weight = (self.weight - mean) * self.gain * scale
-            x = self.conv2d(x, weight)
+        x = self.conv2d(x, weight)
         if self.has_bias:
             x = self.bias_add(x, self.bias)
         return x