diff --git a/research/cv/repvgg/README_CN.md b/research/cv/repvgg/README_CN.md
index e96a15118963e1df601dc739120cbb45f15cb998..43065d7249bb5ca7f4f80dafdeba072e9462a2bf 100644
--- a/research/cv/repvgg/README_CN.md
+++ b/research/cv/repvgg/README_CN.md
@@ -76,6 +76,7 @@ RepVGG鏄敱娓呭崕澶у&鏃蜂笘绉戞妧绛夋彁鍑虹殑涓€绉嶆柊棰栫殑CNN璁捐鑼冨紡
       鈹溾攢鈹€run_distribute_train_ascend.sh   // 澶氬崱Ascend910璁粌鑴氭湰
       鈹溾攢鈹€run_infer_310.sh                 // Ascend310鎺ㄧ悊鑴氭湰
       鈹溾攢鈹€run_eval_ascend.sh               // 娴嬭瘯鑴氭湰
+      鈹溾攢鈹€run_infer_onnx.sh                // ONNX鎺ㄧ悊鑴氭湰
   鈹溾攢鈹€ src
       鈹溾攢鈹€configs                          // RepVGG鐨勯厤缃枃浠�
       鈹溾攢鈹€data                             // 鏁版嵁闆嗛厤缃枃浠�
@@ -99,6 +100,7 @@ RepVGG鏄敱娓呭崕澶у&鏃蜂笘绉戞妧绛夋彁鍑虹殑涓€绉嶆柊棰栫殑CNN璁捐鑼冨紡
   鈹溾攢鈹€ export.py                           // 妯″瀷瀵煎嚭鏂囦欢
   鈹溾攢鈹€ preprocess.py                       // 鎺ㄧ悊鏁版嵁闆嗕笌澶勭悊鏂囦欢
   鈹溾攢鈹€ postprocess.py                      // 鎺ㄧ悊绮惧害澶勭悊鏂囦欢
+  鈹溾攢鈹€ infer_onnx.py                      // 鎺ㄧ悊onnx鏂囦欢
 ```
 
 ## 鑴氭湰鍙傛暟
@@ -111,7 +113,7 @@ RepVGG鏄敱娓呭崕澶у&鏃蜂笘绉戞妧绛夋彁鍑虹殑涓€绉嶆柊棰栫殑CNN璁捐鑼冨紡
     # Architecture
     arch: RepVGG-A0-A0tiny              # RepVGG缁撴瀯閫夋嫨
     # ===== Dataset ===== #
-    data_url: ./data/imagenet           # 鏁版嵁闆嗗湴鍧€
+    data_url: ./dataset                 # 鏁版嵁闆嗗湴鍧€
     set: ImageNet                       # 鏁版嵁闆嗗悕瀛�
     num_classes: 1000                   # 鏁版嵁闆嗗垎绫绘暟鐩�
     mix_up: 0.0                         # MixUp鏁版嵁澧炲己鍙傛暟
@@ -173,6 +175,9 @@ RepVGG鏄敱娓呭崕澶у&鏃蜂笘绉戞妧绛夋彁鍑虹殑涓€绉嶆柊棰栫殑CNN璁捐鑼冨紡
 
   # 浣跨敤鑴氭湰鍚姩鍗曞崱杩愯璇勪及绀轰緥
   bash ./scripts/run_eval_ascend.sh [DEVICE_ID] [CONFIG_PATH] [CHECKPOINT_PATH]
+
+  # 浣跨敤鑴氭湰鍚姩onnx鍗曞崱杩愯璇勪及绀轰緥
+  bash ./scripts/run_infer_onnx.sh [ONNX_PATH] [DATASET_PATH] [DEVICE_TARGET] [DEVICE_ID]
   ```
 
   瀵逛簬鍒嗗竷寮忚缁冿紝闇€瑕佹彁鍓嶅垱寤篔SON鏍煎紡鐨刪ccl閰嶇疆鏂囦欢銆�
@@ -189,13 +194,13 @@ RepVGG鏄敱娓呭崕澶у&鏃蜂笘绉戞妧绛夋彁鍑虹殑涓€绉嶆柊棰栫殑CNN璁捐鑼冨紡
   python export.py --pretrained [CKPT_FILE] --config [CONFIG_PATH] --device_target [DEVICE_TARGET] --file_format [FILE_FORMAT]
   ```
 
-瀵煎嚭鐨勬ā鍨嬩細浠ユā鍨嬬殑缁撴瀯鍚嶅瓧鍛藉悕骞朵笖淇濆瓨鍦ㄥ綋鍓嶇洰褰曚笅, 娉ㄦ剰: FILE_FORMAT 蹇呴』鍦� ["AIR", "MINDIR"]涓€夋嫨銆�
+瀵煎嚭鐨勬ā鍨嬩細浠ユā鍨嬬殑缁撴瀯鍚嶅瓧鍛藉悕骞朵笖淇濆瓨鍦ㄥ綋鍓嶇洰褰曚笅, 娉ㄦ剰: FILE_FORMAT 蹇呴』鍦� ["AIR", "MINDIR", "ONNX"]涓€夋嫨銆�
 
 ## 鎺ㄧ悊杩囩▼
 
 ### 鎺ㄧ悊
 
-鍦ㄨ繘琛屾帹鐞嗕箣鍓嶆垜浠渶瑕佸厛瀵煎嚭妯″瀷銆俶indir鍙互鍦ㄤ换鎰忕幆澧冧笂瀵煎嚭锛宎ir妯″瀷鍙兘鍦ㄦ槆鑵�910鐜涓婂鍑恒€備互涓嬪睍绀轰簡浣跨敤mindir妯″瀷鎵ц鎺ㄧ悊鐨勭ず渚嬨€�
+鍦ㄨ繘琛屾帹鐞嗕箣鍓嶆垜浠渶瑕佸厛瀵煎嚭妯″瀷銆俶indir鍙互鍦ㄤ换鎰忕幆澧冧笂瀵煎嚭锛宎ir妯″瀷鍙兘鍦ㄦ槆鑵�910鐜涓婂鍑恒€俹nnx鍙互鍦–PU/GPU/Ascend鐜涓嬪鍑恒€備互涓嬪睍绀轰簡浣跨敤mindir妯″瀷鎵ц鎺ㄧ悊鐨勭ず渚嬨€�
 
 - 鍦ㄦ槆鑵�310涓婁娇鐢↖mageNet-1k鏁版嵁闆嗚繘琛屾帹鐞�
 
@@ -208,6 +213,17 @@ RepVGG鏄敱娓呭崕澶у&鏃蜂笘绉戞妧绛夋彁鍑虹殑涓€绉嶆柊棰栫殑CNN璁捐鑼冨紡
     Top5 acc: 0.90734
     ```
 
+- 鍦℅PU/CPU涓婁娇鐢↖mageNet-1k鏁版嵁闆嗚繘琛孫NNX鎺ㄧ悊
+
+  鎺ㄧ悊鐨勭粨鏋滀繚瀛樺湪涓荤洰褰曚笅锛屽湪infer_onnx.log鏃ュ織鏂囦欢涓彲浠ユ壘鍒版帹鐞嗙粨鏋溿€�
+
+    ```bash
+    # onnx inference
+    bash run_infer_onnx.sh [ONNX_PATH] [DATASET_PATH] [DEVICE_TARGET] [DEVICE_ID]
+    top-1 accuracy: 0.72024
+    top-5 accuracy: 0.90394
+    ```
+
 # [妯″瀷鎻忚堪](#鐩綍)
 
 ## 鎬ц兘
diff --git a/research/cv/repvgg/infer_onnx.py b/research/cv/repvgg/infer_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..d01128fb6c3bb6edadae3ab52fd7f41d3e6f1684
--- /dev/null
+++ b/research/cv/repvgg/infer_onnx.py
@@ -0,0 +1,56 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""eval"""
+import onnxruntime as ort
+from mindspore import nn
+from src.args import args
+from src.data.imagenet import create_dataset_imagenet
+
+
+def create_session(onnx_path, target_device):
+    if target_device == 'GPU':
+        providers = ['CUDAExecutionProvider']
+    elif target_device == 'CPU':
+        providers = ['CPUExecutionProvider']
+    else:
+        raise ValueError(
+            f'Unsupported target device {target_device}, '
+            f'Expected one of: "CPU", "GPU"'
+        )
+    session = ort.InferenceSession(onnx_path, providers=providers)
+    input_name = session.get_inputs()[0].name
+    return session, input_name
+
+
+def run_eval(onnx_path, data_dir, target_device):
+    session, input_name = create_session(onnx_path, target_device)
+    args.batch_size = 1
+    dataset = create_dataset_imagenet(data_dir, args, training=False)
+    metrics = {
+        'top-1 accuracy': nn.Top1CategoricalAccuracy(),
+        'top-5 accuracy': nn.Top5CategoricalAccuracy(),
+    }
+    for batch in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
+        y_pred = session.run(None, {input_name: batch['image']})[0]
+        for metric in metrics.values():
+            metric.update(y_pred, batch['label'])
+    return {name: metric.eval() for name, metric in metrics.items()}
+
+
+if __name__ == '__main__':
+
+    results = run_eval(args.onnx_path, args.dataset_path, args.device_target)
+    for name, value in results.items():
+        print(f'{name}: {value:.5f}')
diff --git a/research/cv/repvgg/scripts/run_infer_onnx.sh b/research/cv/repvgg/scripts/run_infer_onnx.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f1695ae29af7a1d6af4cb85b37601285927ddfe0
--- /dev/null
+++ b/research/cv/repvgg/scripts/run_infer_onnx.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [[ $# -lt 2 || $# -gt 4 ]]; then
+    echo "Usage: bash run_infer_onnx.sh [ONNX_PATH] [DATASET_PATH] [DEVICE_TARGET(optional)] [DEVICE_ID(optional)]"
+exit 1
+fi
+
+get_real_path(){
+  if [ "${1:0:1}" == "/" ]; then
+    echo "$1"
+  else
+    echo "$(realpath -m $PWD/$1)"
+  fi
+}
+onnx_path=$(get_real_path $1)
+dataset_path=$(get_real_path $2)
+if [ $# -eq 3 ]; then
+  device_target=$3
+fi
+if [ $# -eq 4 ]; then
+  device_id=$4
+fi
+
+echo "onnx_path: "$onnx_path
+echo "dataset_path: "$dataset_path
+echo "device_target: "$device_target
+echo "device_id: "$device_id
+
+function infer()
+{
+    python ./infer_onnx.py --onnx_path=$onnx_path \
+                          --dataset_path=$dataset_path \
+                          --device_target=$device_target \
+                          --device_id=$device_id &> infer_onnx.log
+}
+infer
+if [ $? -ne 0 ]; then
+    echo " execute inference failed"
+    exit 1
+fi
\ No newline at end of file
diff --git a/research/cv/repvgg/src/args.py b/research/cv/repvgg/src/args.py
index 2112dfe2eefee15290c3bb0cfb4b8448a6de9ff3..57ba5389e3870f9d048a2216880d44377fa92a20 100644
--- a/research/cv/repvgg/src/args.py
+++ b/research/cv/repvgg/src/args.py
@@ -1,124 +1,129 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""global args for Transformer in Transformer(TNT)"""
-import argparse
-import ast
-import os
-import sys
-
-import yaml
-
-from src.configs import parser as _parser
-
-args = None
-
-
-def parse_arguments():
-    """parse_arguments"""
-    global args
-    parser = argparse.ArgumentParser(description="MindSpore TNT Training")
-
-    parser.add_argument("-a", "--arch", metavar="ARCH", default="ResNet18", help="model architecture")
-    parser.add_argument("--accumulation_step", default=1, type=int, help="accumulation step")
-    parser.add_argument("--amp_level", default="O2", choices=["O0", "O1", "O2", "O3"], help="AMP Level")
-    parser.add_argument("--batch_size", default=256, type=int, metavar="N",
-                        help="mini-batch size (default: 256), this is the total "
-                             "batch size of all Devices on the current node when "
-                             "using Data Parallel or Distributed Data Parallel")
-    parser.add_argument("--beta", default=[0.9, 0.999], type=lambda x: [float(a) for a in x.split(",")],
-                        help="beta for optimizer")
-    parser.add_argument("--with_ema", default=False, type=ast.literal_eval, help="training with ema")
-    parser.add_argument("--ema_decay", default=0.9999, type=float, help="ema decay")
-    parser.add_argument('--data_url', default="./data", help='location of data.')
-    parser.add_argument("--device_id", default=0, type=int, help="device id")
-    parser.add_argument("--device_num", default=1, type=int, help="device num")
-    parser.add_argument("--device_target", default="Ascend", choices=["GPU", "Ascend"], type=str)
-    parser.add_argument("--epochs", default=300, type=int, metavar="N", help="number of total epochs to run")
-    parser.add_argument("--eps", default=1e-8, type=float)
-    parser.add_argument("--file_format", type=str, choices=["AIR", "MINDIR"], default="MINDIR", help="file format")
-    parser.add_argument("--in_chans", default=3, type=int)
-    parser.add_argument("--is_dynamic_loss_scale", default=1, type=int, help="is_dynamic_loss_scale ")
-    parser.add_argument("--keep_checkpoint_max", default=20, type=int, help="keep checkpoint max num")
-    parser.add_argument("--optimizer", help="Which optimizer to use", default="sgd")
-    parser.add_argument("--set", help="name of dataset", type=str, default="ImageNet")
-    parser.add_argument("--mix_up", default=0., type=float, help="mix up")
-    parser.add_argument("--mlp_ratio", help="mlp ", default=4., type=float)
-    parser.add_argument("-j", "--num_parallel_workers", default=20, type=int, metavar="N",
-                        help="number of data loading workers (default: 20)")
-    parser.add_argument("--start_epoch", default=0, type=int, metavar="N",
-                        help="manual epoch number (useful on restarts)")
-    parser.add_argument("--warmup_length", default=0, type=int, help="number of warmup iterations")
-    parser.add_argument("--warmup_lr", default=5e-7, type=float, help="warm up learning rate")
-    parser.add_argument("--wd", "--weight_decay", default=0.05, type=float, metavar="W",
-                        help="weight decay (default: 0.05)", dest="weight_decay")
-    parser.add_argument("--loss_scale", default=1024, type=int, help="loss_scale")
-    parser.add_argument("--lr", "--learning_rate", default=5e-4, type=float, help="initial lr", dest="lr")
-    parser.add_argument("--lr_scheduler", default="cosine_annealing", help="schedule for the learning rate.")
-    parser.add_argument("--lr_adjust", default=30, type=float, help="interval to drop lr")
-    parser.add_argument("--lr_gamma", default=0.97, type=int, help="multistep multiplier")
-    parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
-    parser.add_argument("--num_classes", default=1000, type=int)
-    parser.add_argument("--pretrained", dest="pretrained", default=None, type=str, help="use pre-trained model")
-    parser.add_argument("--config", help="Config file to use (see configs dir)", default=None, required=True)
-    parser.add_argument("--seed", default=0, type=int, help="seed for initializing training. ")
-    parser.add_argument("--save_every", default=2, type=int, help="save every ___ epochs(default:2)")
-    parser.add_argument("--label_smoothing", type=float, help="label smoothing to use, default 0.1", default=0.1)
-    parser.add_argument("--image_size", default=224, help="image Size.", type=int)
-    parser.add_argument('--train_url', default="./", help='location of training outputs.')
-    parser.add_argument("--run_modelarts", type=ast.literal_eval, default=False, help="whether run on modelarts")
-    parser.add_argument("--deploy", type=ast.literal_eval, default=False, help="whether run deploy")
-    args = parser.parse_args()
-
-    get_config()
-
-
-def get_config():
-    """get_config"""
-    global args
-    override_args = _parser.argv_to_vars(sys.argv)
-    # load yaml file
-    if args.run_modelarts:
-        import moxing as mox
-        if not args.config.startswith("obs:/"):
-            args.config = "obs:/" + args.config
-        with mox.file.File(args.config, 'r') as f:
-            yaml_txt = f.read()
-    else:
-        yaml_txt = open(args.config).read()
-
-    # override args
-    loaded_yaml = yaml.load(yaml_txt, Loader=yaml.FullLoader)
-
-    for v in override_args:
-        loaded_yaml[v] = getattr(args, v)
-
-    print(f"=> Reading YAML config from {args.config}")
-
-    args.__dict__.update(loaded_yaml)
-    print(args)
-    os.environ["DEVICE_TARGET"] = args.device_target
-    if "DEVICE_NUM" not in os.environ.keys():
-        os.environ["DEVICE_NUM"] = str(args.device_num)
-    if "RANK_SIZE" not in os.environ.keys():
-        os.environ["RANK_SIZE"] = str(args.device_num)
-
-def run_args():
-    """run and get args"""
-    global args
-    if args is None:
-        parse_arguments()
-
-
-run_args()
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""global args for Transformer in Transformer(TNT)"""
+import argparse
+import ast
+import os
+import sys
+
+import yaml
+
+from src.configs import parser as _parser
+
+args = None
+
+
+def parse_arguments():
+    """parse_arguments"""
+    global args
+    parser = argparse.ArgumentParser(description="MindSpore TNT Training")
+
+    parser.add_argument("-a", "--arch", metavar="ARCH", default="ResNet18", help="model architecture")
+    parser.add_argument("--accumulation_step", default=1, type=int, help="accumulation step")
+    parser.add_argument("--amp_level", default="O0", choices=["O0", "O1", "O2", "O3"], help="AMP Level")
+    parser.add_argument("--batch_size", default=256, type=int, metavar="N",
+                        help="mini-batch size (default: 256), this is the total "
+                             "batch size of all Devices on the current node when "
+                             "using Data Parallel or Distributed Data Parallel")
+    parser.add_argument("--beta", default=[0.9, 0.999], type=lambda x: [float(a) for a in x.split(",")],
+                        help="beta for optimizer")
+    parser.add_argument("--with_ema", default=False, type=ast.literal_eval, help="training with ema")
+    parser.add_argument("--ema_decay", default=0.9999, type=float, help="ema decay")
+    parser.add_argument('--data_url', default="./data", help='location of data.')
+    parser.add_argument("--device_id", default=0, type=int, help="device id")
+    parser.add_argument("--device_num", default=1, type=int, help="device num")
+    parser.add_argument("--device_target", default="Ascend", choices=["GPU", "Ascend"], type=str)
+    parser.add_argument("--epochs", default=300, type=int, metavar="N", help="number of total epochs to run")
+    parser.add_argument("--eps", default=1e-8, type=float)
+    parser.add_argument("--file_format", type=str, choices=["AIR", "MINDIR", "ONNX"], default="MINDIR",
+                        help="file format")
+    parser.add_argument("--in_chans", default=3, type=int)
+    parser.add_argument("--is_dynamic_loss_scale", default=1, type=int, help="is_dynamic_loss_scale ")
+    parser.add_argument("--keep_checkpoint_max", default=20, type=int, help="keep checkpoint max num")
+    parser.add_argument("--optimizer", help="Which optimizer to use", default="sgd")
+    parser.add_argument("--set", help="name of dataset", type=str, default="ImageNet")
+    parser.add_argument("--mix_up", default=0., type=float, help="mix up")
+    parser.add_argument("--mlp_ratio", help="mlp ", default=4., type=float)
+    parser.add_argument("-j", "--num_parallel_workers", default=20, type=int, metavar="N",
+                        help="number of data loading workers (default: 20)")
+    parser.add_argument("--start_epoch", default=0, type=int, metavar="N",
+                        help="manual epoch number (useful on restarts)")
+    parser.add_argument("--warmup_length", default=0, type=int, help="number of warmup iterations")
+    parser.add_argument("--warmup_lr", default=5e-7, type=float, help="warm up learning rate")
+    parser.add_argument("--wd", "--weight_decay", default=0.05, type=float, metavar="W",
+                        help="weight decay (default: 0.05)", dest="weight_decay")
+    parser.add_argument("--loss_scale", default=1024, type=int, help="loss_scale")
+    parser.add_argument("--lr", "--learning_rate", default=5e-4, type=float, help="initial lr", dest="lr")
+    parser.add_argument("--lr_scheduler", default="cosine_annealing", help="schedule for the learning rate.")
+    parser.add_argument("--lr_adjust", default=30, type=float, help="interval to drop lr")
+    parser.add_argument("--lr_gamma", default=0.97, type=int, help="multistep multiplier")
+    parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
+    parser.add_argument("--num_classes", default=1000, type=int)
+    parser.add_argument("--pretrained", dest="pretrained", default=None, type=str,
+                        help="use pre-trained model")
+    parser.add_argument("--config", help="Config file to use (see configs dir)",
+                        default="./src/configs/RepVGG-A0.yaml", required=False)
+    parser.add_argument("--seed", default=0, type=int, help="seed for initializing training. ")
+    parser.add_argument("--save_every", default=2, type=int, help="save every ___ epochs(default:2)")
+    parser.add_argument("--label_smoothing", type=float, help="label smoothing to use, default 0.1", default=0.1)
+    parser.add_argument("--image_size", default=224, help="image Size.", type=int)
+    parser.add_argument('--train_url', default="./", help='location of training outputs.')
+    parser.add_argument("--run_modelarts", type=ast.literal_eval, default=False, help="whether run on modelarts")
+    parser.add_argument("--deploy", type=ast.literal_eval, default=False, help="whether run deploy")
+    parser.add_argument("--onnx_path", type=str, default=None, help="ONNX file path")
+    parser.add_argument("--dataset_path", type=str, default=None, help="Dataset path")
+    args = parser.parse_args()
+
+    get_config()
+
+
+def get_config():
+    """get_config"""
+    global args
+    override_args = _parser.argv_to_vars(sys.argv)
+    # load yaml file
+    if args.run_modelarts:
+        import moxing as mox
+        if not args.config.startswith("obs:/"):
+            args.config = "obs:/" + args.config
+        with mox.file.File(args.config, 'r') as f:
+            yaml_txt = f.read()
+    else:
+        yaml_txt = open(args.config).read()
+
+    # override args
+    loaded_yaml = yaml.load(yaml_txt, Loader=yaml.FullLoader)
+
+    for v in override_args:
+        loaded_yaml[v] = getattr(args, v)
+
+    print(f"=> Reading YAML config from {args.config}")
+
+    args.__dict__.update(loaded_yaml)
+    print(args)
+    os.environ["DEVICE_TARGET"] = args.device_target
+    if "DEVICE_NUM" not in os.environ.keys():
+        os.environ["DEVICE_NUM"] = str(args.device_num)
+    if "RANK_SIZE" not in os.environ.keys():
+        os.environ["RANK_SIZE"] = str(args.device_num)
+
+def run_args():
+    """run and get args"""
+    global args
+    if args is None:
+        parse_arguments()
+
+
+run_args()
diff --git a/research/cv/repvgg/src/configs/RepVGG-A0.yaml b/research/cv/repvgg/src/configs/RepVGG-A0.yaml
index 76348592d55f081663a7acb4b732509513e6623c..641152d5b71f38b7128d359cddf28510c6b20059 100644
--- a/research/cv/repvgg/src/configs/RepVGG-A0.yaml
+++ b/research/cv/repvgg/src/configs/RepVGG-A0.yaml
@@ -45,4 +45,4 @@ ema_decay: 0.9999
 
 # ===== Hardware setup ===== #
 num_parallel_workers: 16
-device_target: Ascend
\ No newline at end of file
+device_target: Ascend