diff --git a/research/cv/EDSR/DIV2K_config.yaml b/research/cv/EDSR/DIV2K_config.yaml
index 3be2f73f124c4ce8c51905eddcaf16758c227b08..b5bb17a385f2dbb7833e30da43fd4b0320ba64b4 100644
--- a/research/cv/EDSR/DIV2K_config.yaml
+++ b/research/cv/EDSR/DIV2K_config.yaml
@@ -22,6 +22,7 @@ epoch_size: 6000
 eval_epoch_frq: 20
 self_ensemble: True
 save_sr: True
+eval_type: ""
 
 # Adam opt options
 opt_type: Adam
diff --git a/research/cv/EDSR/README_CN.md b/research/cv/EDSR/README_CN.md
index 34638ca943c83ebe4a4660f9df2555b6fc27d5d7..cf5459758411b8727b03cd5aedd94d3c8d0d72ef 100644
--- a/research/cv/EDSR/README_CN.md
+++ b/research/cv/EDSR/README_CN.md
@@ -21,6 +21,7 @@
         - [鎺ㄧ悊](#鎺ㄧ悊)
             - [鍦ㄦ槆鑵�310涓婁娇鐢―IV2K鏁版嵁闆嗚繘琛屾帹鐞哴(#鍦ㄦ槆鑵�310涓婁娇鐢―IV2K鏁版嵁闆嗚繘琛屾帹鐞�)
             - [鍦ㄦ槆鑵�310涓婁娇鐢ㄥ叾浠栨暟鎹泦杩涜鎺ㄧ悊](#鍦ㄦ槆鑵�310涓婁娇鐢ㄥ叾浠栨暟鎹泦杩涜鎺ㄧ悊)
+            - [杩涜onnx鎺ㄧ悊](#杩涜onnx鎺ㄧ悊)
 - [妯″瀷鎻忚堪](#妯″瀷鎻忚堪)
     - [鎬ц兘](#鎬ц兘)
         - [璁粌鎬ц兘](#璁粌鎬ц兘)
@@ -218,7 +219,7 @@ EDSR鏄敱澶氫釜浼樺寲鍚庣殑residual blocks涓茶仈鑰屾垚锛岀浉姣斿師濮嬬増鏈殑r
 
 ## 鑴氭湰鍙婃牱渚嬩唬鐮�
 
-```bash
+```text
 鈹溾攢鈹€ model_zoo
     鈹溾攢鈹€ README.md                       // 鎵€鏈夋ā鍨嬬浉鍏宠鏄�
     鈹溾攢鈹€ EDSR
@@ -229,6 +230,7 @@ EDSR鏄敱澶氫釜浼樺寲鍚庣殑residual blocks涓茶仈鑰屾垚锛岀浉姣斿師濮嬬増鏈殑r
         鈹�   鈹溾攢鈹€run_train.sh             // 鍒嗗竷寮忓埌Ascend鐨剆hell鑴氭湰
         鈹�   鈹溾攢鈹€run_eval.sh              // Ascend璇勪及鐨剆hell鑴氭湰
         鈹�   鈹溾攢鈹€run_infer_310.sh         // Ascend-310鎺ㄧ悊shell鑴氭湰
+        鈹�   鈹斺攢鈹€ run_eval_onnx.sh        // 鐢ㄤ簬ONNX璇勪及鐨剆hell鑴氭湰
         鈹溾攢鈹€ src
         鈹�   鈹溾攢鈹€dataset.py               // 鍒涘缓鏁版嵁闆�
         鈹�   鈹溾攢鈹€edsr.py                  // edsr缃戠粶鏋舵瀯
@@ -237,7 +239,8 @@ EDSR鏄敱澶氫釜浼樺寲鍚庣殑residual blocks涓茶仈鑰屾垚锛岀浉姣斿師濮嬬増鏈殑r
         鈹�   鈹溾攢鈹€utils.py                 // train.py/eval.py鍏敤鐨勪唬鐮佹
         鈹溾攢鈹€ train.py                    // 璁粌鑴氭湰
         鈹溾攢鈹€ eval.py                     // 璇勪及鑴氭湰
-        鈹溾攢鈹€ export.py                   // 灏哻heckpoint鏂囦欢瀵煎嚭鍒癮ir/mindir
+        鈹溾攢鈹€ eval_onnx.py                // ONNX璇勪及鑴氭湰
+        鈹溾攢鈹€ export.py                   // 灏哻heckpoint鏂囦欢瀵煎嚭鍒皁nnx/air/mindir
         鈹溾攢鈹€ preprocess.py               // Ascend-310鎺ㄧ悊鐨勬暟鎹澶勭悊鑴氭湰
         鈹溾攢鈹€ ascend310_infer
         鈹�   鈹溾攢鈹€src                      // 瀹炵幇Ascend-310鎺ㄧ悊婧愪唬鐮�
@@ -310,7 +313,8 @@ EDSR鏄敱澶氫釜浼樺寲鍚庣殑residual blocks涓茶仈鑰屾垚锛岀浉姣斿師濮嬬増鏈殑r
 
 ## 瀵煎嚭
 
-鍦ㄨ繍琛屾帹鐞嗕箣鍓嶆垜浠渶瑕佸厛瀵煎嚭妯″瀷銆侫ir妯″瀷鍙兘鍦ㄦ槆鑵�910鐜涓婂鍑猴紝mindir鍙互鍦ㄤ换鎰忕幆澧冧笂瀵煎嚭銆俠atch_size鍙敮鎸�1銆�
+鍦ㄨ繍琛屾帹鐞嗕箣鍓嶆垜浠渶瑕佸厛瀵煎嚭妯″瀷銆侫ir妯″瀷鍙兘鍦ㄦ槆鑵�910鐜涓婂鍑猴紝mindir/onnx鍙互鍦ㄤ换鎰忕幆澧冧笂瀵煎嚭銆俠atch_size鍙敮鎸�1銆�
+娉ㄦ剰锛氳嫢瑕佸鍑簅nnx闇€瑕佸皢export.py浠g爜涓璮ile_format = 'MINDIR'淇敼涓篺ile_format = 'ONNX'
 
 ### 瀵煎嚭鑴氭湰
 
@@ -356,14 +360,44 @@ python export.py --config_path DIV2K_config.yaml --output_path [dir to save mode
 
 - 鎺ㄧ悊娴佺▼
 
-```bash
-# (1) 鏁寸悊鏁版嵁闆嗭紝lr鍥剧墖缁熶竴padding鍒颁竴涓浐瀹氬昂瀵搞€傚弬鑰僷reprocess.py
-# (2) 鏍规嵁鍥哄畾灏哄瀵煎嚭妯″瀷锛屽弬鑰僥xport.py
-# (3) 浣跨敤build.sh鍦╝scend310_infer鏂囦欢澶瑰唴缂栬瘧鎺ㄧ悊绋嬪簭锛屽緱鍒扮▼搴廰scend310_infer/out/main
-# (4) 閰嶇疆鏁版嵁闆嗗浘鐗囪矾寰勶紝妯″瀷璺緞锛岃緭鍑鸿矾寰勭瓑锛屼娇鐢╩ain鎺ㄧ悊寰楀埌瓒呭垎杈ㄧ巼閲嶅缓鍥剧墖銆�
-./ascend310_infer/out/main --mindir_path=[model] --dataset_path=[read_data_path] --device_id=[device_id] --save_dir=[save_data_path]
-# (5) 鍚庡鐞嗗浘鐗囷紝鍘婚櫎padding鐨勬棤鏁堝尯鍩熴€傚拰hr鍥句竴璧风粺璁℃寚鏍囥€傚弬鑰僷reprocess.py
-```
+  ```bash
+  # (1) 鏁寸悊鏁版嵁闆嗭紝lr鍥剧墖缁熶竴padding鍒颁竴涓浐瀹氬昂瀵搞€傚弬鑰僷reprocess.py
+  # (2) 鏍规嵁鍥哄畾灏哄瀵煎嚭妯″瀷锛屽弬鑰僥xport.py
+  # (3) 浣跨敤build.sh鍦╝scend310_infer鏂囦欢澶瑰唴缂栬瘧鎺ㄧ悊绋嬪簭锛屽緱鍒扮▼搴廰scend310_infer/out/main
+  # (4) 閰嶇疆鏁版嵁闆嗗浘鐗囪矾寰勶紝妯″瀷璺緞锛岃緭鍑鸿矾寰勭瓑锛屼娇鐢╩ain鎺ㄧ悊寰楀埌瓒呭垎杈ㄧ巼閲嶅缓鍥剧墖銆�
+  ./ascend310_infer/out/main --mindir_path=[model] --dataset_path=[read_data_path] --device_id=[device_id] --save_dir=[save_data_path]
+  # (5) 鍚庡鐞嗗浘鐗囷紝鍘婚櫎padding鐨勬棤鏁堝尯鍩熴€傚拰hr鍥句竴璧风粺璁℃寚鏍囥€傚弬鑰僷reprocess.py
+  ```
+
+#### 杩涜onnx鎺ㄧ悊
+
+- 鎺ㄧ悊娴佺▼
+
+  ```bash
+  # (1) 鏁寸悊鏁版嵁闆嗭紝lr鍥剧墖缁熶竴padding鍒颁竴涓浐瀹氬昂瀵搞€傚弬鑰僷reprocess.py
+  # (2) 鏍规嵁鍥哄畾灏哄瀵煎嚭妯″瀷锛屽弬鑰僥xport.py
+  # (3) 鎵ц鎺ㄧ悊鑴氭湰
+  ```
+
+- 鍦℅PU鐜涓繍琛孫NNX璇勪及
+
+  ```bash
+  # 杩愯X2璇勪及绀轰緥(EDSR(x2) in the paper)
+  bash scripts/run_eval_onnx.sh ./DIV2K_config.yaml  2  DIV2K path output_path  pre_trained_model_path  ONNX
+  # 杩愯X3璇勪及绀轰緥(EDSR(x3) in the paper)
+  bash scripts/run_eval_onnx.sh ./DIV2K_config.yaml  3  DIV2K path output_path  pre_trained_model_path  ONNX
+  # 杩愯X4璇勪及绀轰緥(EDSR(x4) in the paper)
+  bash scripts/run_eval_onnx.sh ./DIV2K_config.yaml  2  DIV2K path output_path  pre_trained_model_path  ONNX
+  ```
+
+  涓婅堪python鍛戒护灏嗗湪鍚庡彴杩愯锛屾偍鍙互閫氳繃eval_onnx.log鏂囦欢鏌ョ湅缁撴灉銆傛祴璇曟暟鎹泦鐨勫噯纭€у涓嬶細
+
+  ```bash
+  .....
+  [100/100] rank = 0 result = {'psnr': 29.297856984107398, 'num_sr': 100.0, 'time': 5.842652082443237}
+  evaluation result = {'psnr': 29.297856984107398, 'num_sr': 100.0, 'time': 2905.9808044433594}
+  eval success
+  ```
 
 # 妯″瀷鎻忚堪
 
diff --git a/research/cv/EDSR/eval_onnx.py b/research/cv/EDSR/eval_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..47bc4e689d423057c14fa4a90e46184ac26ca93b
--- /dev/null
+++ b/research/cv/EDSR/eval_onnx.py
@@ -0,0 +1,104 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+#################evaluate EDSR example on DIV2K########################
+"""
+import os
+import time
+
+import mindspore
+from mindspore.common import set_seed
+from mindspore import Tensor
+import onnxruntime as ort
+
+from src.metric import PSNR, SaveSrHr
+from src.utils import init_env, init_dataset
+from model_utils.config import config
+
+set_seed(2021)
+
+def create_session(checkpoint_path, target_device):
+    """Create ONNX runtime session"""
+    if target_device == 'GPU':
+        providers = ['CUDAExecutionProvider']
+    elif target_device in ('CPU', 'Ascend'):
+        providers = ['CPUExecutionProvider']
+    else:
+        raise ValueError(f"Unsupported target device '{target_device}'. Expected one of: 'CPU', 'GPU', 'Ascend'")
+    session = ort.InferenceSession(checkpoint_path, providers=providers)
+    input_names = [x.name for x in session.get_inputs()]
+    return session, input_names
+
+def unpadding(img, target_shape):
+    h, w = target_shape[2], target_shape[3]
+    _, _, img_h, img_w = img.shape
+    if img_h > h:
+        img = img[:, :, :h, :]
+    if img_w > w:
+        img = img[:, :, :, :w]
+    return img
+
+def do_eval(session, input_names, ds_val, metrics, cur_epoch=None):
+    """
+    do eval for psnr and save hr, sr
+    """
+    total_step = ds_val.get_dataset_size()
+    setw = len(str(total_step))
+    begin = time.time()
+    step_begin = time.time()
+    rank_id = 0
+    for i, (lr, hr) in enumerate(ds_val):
+        input_data = [lr.asnumpy()]
+        sr = session.run(None, dict(zip(input_names, input_data)))
+        sr = Tensor(unpadding(sr[0], hr.shape), mindspore.float32)
+        _ = [m.update(sr, hr) for m in metrics.values()]
+        result = {k: m.eval(sync=False) for k, m in metrics.items()}
+        result["time"] = time.time() - step_begin
+        step_begin = time.time()
+        print(f"[{i+1:>{setw}}/{total_step:>{setw}}] rank = {rank_id} result = {result}", flush=True)
+    result = {k: m.eval(sync=True) for k, m in metrics.items()}
+    result["time"] = time.time() - begin
+    print(f"evaluation result = {result}", flush=True)
+    return result
+
+def run_eval():
+    """
+    run eval
+    """
+    print(config, flush=True)
+    cfg = config
+    cfg.lr_type = "bicubic_AUG_self_ensemble"
+
+    init_env(cfg)
+    session, input_names = create_session(cfg.pre_trained, 'GPU')
+
+    if cfg.dataset_name == "DIV2K":
+        cfg.batch_size = 1
+        cfg.patch_size = -1
+        ds_val = init_dataset(cfg, "valid")
+        metrics = {
+            "psnr": PSNR(rgb_range=cfg.rgb_range, shave=6 + cfg.scale),
+        }
+        if config.save_sr:
+            save_img_dir = os.path.join(cfg.output_path, "HrSr")
+            os.makedirs(save_img_dir, exist_ok=True)
+            metrics["num_sr"] = SaveSrHr(save_img_dir)
+        do_eval(session, input_names, ds_val, metrics)
+        print("eval success", flush=True)
+    else:
+        raise RuntimeError("Unsupported dataset.")
+
+if __name__ == '__main__':
+    run_eval()
diff --git a/research/cv/EDSR/requirements.txt b/research/cv/EDSR/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fc2dde7d393645f11405e9ea84670961c42a7713
--- /dev/null
+++ b/research/cv/EDSR/requirements.txt
@@ -0,0 +1,4 @@
+onnxruntime-gpu
+pillow
+numpy
+pyyaml
\ No newline at end of file
diff --git a/research/cv/EDSR/scripts/run_eval_onnx.sh b/research/cv/EDSR/scripts/run_eval_onnx.sh
new file mode 100644
index 0000000000000000000000000000000000000000..da58aac3bd1e9847a2920da85ae0c7661ea6f26c
--- /dev/null
+++ b/research/cv/EDSR/scripts/run_eval_onnx.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+echo "=============================================================================================================="
+echo "Please run the script as: "
+echo "bash run.sh DEVICE_ID CKPT_PATH"
+echo "For example: bash scripts/run_eval_onnx.sh ./DIV2K_config.yaml  2  DIV2K path output_path  pre_trained_model_path  ONNX"
+echo "It is better to use the absolute path."
+echo "=============================================================================================================="
+
+if [ $# != 6 ]
+then
+    echo "Usage:  bash scripts/run_eval_onnx.sh [config_path]  [scale]  [data_path] [output_path]  [pre_trained_model_path]  [eval_type]"
+exit 1
+fi
+
+export args=${*:1}
+python eval_onnx.py --config_path $1 --scale $2 --data_path $3 --output_path $4 --pre_trained $5 --eval_type $6 > eval_onnx.log 2>&1 &
diff --git a/research/cv/EDSR/src/dataset.py b/research/cv/EDSR/src/dataset.py
index a1f4e7fd3a61e2c3a932088bac03d9faba1e304a..93be90c6548e6ebd74af9223b7a3efc8ae67cfc3 100644
--- a/research/cv/EDSR/src/dataset.py
+++ b/research/cv/EDSR/src/dataset.py
@@ -241,7 +241,10 @@ def create_dataset_DIV2K(config, dataset_type="train", num_parallel_workers=10,
     """
     dataset_path = config["dataset_path"]
     lr_scale = config["scale"]
-    lr_type = config.get("lr_type", "bicubic")
+    if config["eval_type"] == "ONNX":
+        lr_type = config.get("lr_type", "bicubic_AUG_self_ensemble")
+    else:
+        lr_type = config.get("lr_type", "bicubic")
     batch_size = config.get("batch_size", 1)
     patch_size = config.get("patch_size", -1)
     epoch_size = config.get("epoch_size", None)
@@ -261,7 +264,10 @@ def create_dataset_DIV2K(config, dataset_type="train", num_parallel_workers=10,
     lrs_pattern = []
     for lr_scale in multi_lr_scale:
         dir_lr = os.path.join(dataset_path, f"DIV2K_{dataset_type}_LR_{lr_type}", f"X{lr_scale}")
-        lr_pattern = os.path.join(dir_lr, f"*x{lr_scale}.png")
+        if config["eval_type"] == "ONNX":
+            lr_pattern = os.path.join(dir_lr, f"*x{lr_scale}_0.png")
+        else:
+            lr_pattern = os.path.join(dir_lr, f"*x{lr_scale}.png")
         lrs_pattern.append(lr_pattern)
         column_names.append(f"lrx{lr_scale}")
     column_names.append("hr")  # ["lrx2","lrx3","lrx4",..., "hr"]
diff --git a/research/cv/EDSR/src/metric.py b/research/cv/EDSR/src/metric.py
index e6f11794b89a2d9049dfb5ea866279b47699ac45..a5412313bbfabfa8ec47f211204d98bbf035bcc1 100644
--- a/research/cv/EDSR/src/metric.py
+++ b/research/cv/EDSR/src/metric.py
@@ -199,13 +199,12 @@ class Quantizer(nn.Cell):
     """
     def __init__(self, _min=0.0, _max=255.0):
         super(Quantizer, self).__init__()
-        self.round = ops.Round()
         self._min = _min
         self._max = _max
 
     def construct(self, x):
         x = ops.clip_by_value(x, self._min, self._max)
-        x = self.round(x)
+        x = x.astype("Int32")
         return x
 
 
@@ -239,6 +238,7 @@ class _DistMetric(nn.Metric):
         if get_device_num is not None and get_device_num() > 1:
             self.all_reduce_sum = TensorSyncer(_type="sum")
         self.clear()
+        self.sum = None
 
     def _accumulate(self, value):
         if isinstance(value, (list, tuple)):
@@ -293,7 +293,7 @@ class PSNR(_DistMetric):
         diff = (sr - hr) / self.rgb_range
         valid = diff
         if self.shave is not None and self.shave != 0:
-            valid = valid[..., self.shave:(-self.shave), self.shave:(-self.shave)]
+            valid = valid[..., int(self.shave):int(-self.shave), int(self.shave):int(-self.shave)]
         mse_list = (valid ** 2).mean(axis=(1, 2, 3))
         mse_list = self._convert_data(mse_list).tolist()
         psnr_list = [float(1e32) if mse == 0 else(- 10.0 * math.log10(mse)) for mse in mse_list]
diff --git a/research/cv/EDSR/src/utils.py b/research/cv/EDSR/src/utils.py
index b842876e330b969a216b6e0417e8232845ad297b..65eea98a77a15d8e1cddd551f211eef08f2556c0 100644
--- a/research/cv/EDSR/src/utils.py
+++ b/research/cv/EDSR/src/utils.py
@@ -65,6 +65,7 @@ def init_dataset(cfg, dataset_type="train"):
         "lr_type": cfg.lr_type,
         "batch_size": cfg.batch_size,
         "patch_size": cfg.patch_size,
+        "eval_type": cfg.eval_type,
     }
     if cfg.dataset_name == "DIV2K":
         dataset = create_dataset_DIV2K(config=ds_cfg,