diff --git a/official/cv/brdnet/README_CN.md b/official/cv/brdnet/README_CN.md
index 9c49d57b7b738a9b5515cffcc6a0f208f885180d..8ef04571c3c1d2ae68e90cb52f2b429dd70e4585 100644
--- a/official/cv/brdnet/README_CN.md
+++ b/official/cv/brdnet/README_CN.md
@@ -62,7 +62,7 @@ BRDNet 包含上下两个分支。上分支仅仅包含残差学习与 BRN;下
 通过官方网站安装 MindSpore 后,您可以按照如下步骤进行训练和评估:
 
 ```shell
-#通过 python 命令行运行单卡训练脚本。 请注意 train_data 需要以"/"结尾
+#通过 python 命令行运行单卡训练脚本。
 python train.py \
 --train_data=xxx/dataset/waterloo5050step40colorimage/ \
 --sigma=15 \
@@ -74,11 +74,11 @@ python train.py \
 --is_distributed=0 \
 --epoch=50 > log.txt 2>&1 &
 
-#通过 sh 命令启动单卡训练。(对 train_data 等参数的路径格式无要求,内部会自动转为绝对路径以及以"/"结尾)
-sh ./scripts/run_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr]
+#通过 bash 命令启动单卡训练。
+bash ./scripts/run_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr]
 
 #Ascend多卡训练。
-sh run_distribute_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr] [rank_table_file_path]
+bash run_distribute_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr] [rank_table_file_path]
 
 # 通过 python 命令行运行推理脚本。请注意 test_dir 需要以"/"结尾;
 # pretrain_path 指 ckpt 所在目录,为了兼容 modelarts,将其拆分为了 “路径” 与 “文件名”
@@ -92,8 +92,8 @@ python eval.py \
 --output_path=./output/ \
 --is_distributed=0 > log.txt 2>&1 &
 
-#通过 sh 命令启动推理。(对 test_dir 等参数的路径格式无要求,内部会自动转为绝对路径以及以"/"结尾)
-sh run_eval.sh [train_code_path] [test_dir] [sigma] [channel] [pretrain_path] [ckpt_name]
+#通过 bash 命令启动推理。(对 test_dir 等参数的路径格式无要求,内部会自动转为绝对路径以及以"/"结尾)
+bash run_eval.sh [train_code_path] [test_dir] [sigma] [channel] [pretrain_path] [ckpt_name]
 ```
 
 Ascend训练:生成[RANK_TABLE_FILE](https://gitee.com/mindspore/models/tree/master/utils/hccl_tools)
@@ -122,7 +122,6 @@ Ascend训练:生成[RANK_TABLE_FILE](https://gitee.com/mindspore/models/tree/m
         │   ├──run_infer_310.sh                // 启动310推理的脚本
         ├── src
         │   ├──dataset.py                      // 数据集处理
-        │   ├──distributed_sampler.py          // 8卡并行时的数据集切分操作
         │   ├──logger.py                       // 日志打印文件
         │   ├──models.py                       // 模型结构
         ├── export.py                          // 将权重文件导出为 MINDIR 等格式的脚本
@@ -143,7 +142,8 @@ train.py 中的主要参数如下:
 --epoch: 训练次数
 --lr: 初始学习率
 --save_every: 权重保存频率(每 N 个 epoch 保存一次)
---pretrain: 预训练文件(接着该文件继续训练)
+--resume_path: 预训练文件路径(接着该文件继续训练)
+--resume_name: 预训练文件名
 --use_modelarts: 是否使用 modelarts(1 for True, 0 for False; 设置为 1 时将使用 moxing 从 obs 拷贝数据)
 --train_url: ( modelsarts 需要的参数,但因该名称存在歧义而在代码中未使用)
 --data_url: ( modelsarts 需要的参数,但因该名称存在歧义而在代码中未使用)
@@ -201,7 +201,7 @@ cal_psnr.py 中的主要参数如下:
 - Ascend处理器环境运行
 
   ```shell
-  #通过 python 命令行运行单卡训练脚本。 请注意 train_data 需要以"/"结尾
+  #通过 python 命令行运行单卡训练脚本。
   python train.py \
   --train_data=xxx/dataset/waterloo5050step40colorimage/ \
   --sigma=15 \
@@ -213,13 +213,13 @@ cal_psnr.py 中的主要参数如下:
   --is_distributed=0 \
   --epoch=50 > log.txt 2>&1 &
 
-  #通过 sh 命令启动单卡训练。(对 train_data 等参数的路径格式无要求,内部会自动转为绝对路径以及以"/"结尾)
-  sh ./scripts/run_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr]
+  #通过 bash 命令启动单卡训练。
+  bash ./scripts/run_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr]
 
   #上述命令均会使脚本在后台运行,日志将输出到 log.txt,可通过查看该文件了解训练详情
 
   #Ascend多卡训练(2、4、8卡配置请自行修改run_distribute_train.sh,默认8卡)
-  sh run_distribute_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr] [rank_table_file_path]
+  bash run_distribute_train.sh [train_code_path] [train_data] [batch_size] [sigma] [channel] [epoch] [lr] [rank_table_file_path]
   ```
 
   注意:第一次运行时可能会较长时间停留在如下界面,这是因为当一个 epoch 运行完成后才会打印日志,请耐心等待。
@@ -401,8 +401,8 @@ cal_psnr.py 中的主要参数如下:
   --output_path=./output/ \
   --is_distributed=0 > log.txt 2>&1 &
 
-  #通过 sh 命令启动评估 (对 test_dir 等参数的路径格式无要求,内部会自动转为绝对路径以及以"/"结尾)
-  sh run_eval.sh [train_code_path] [test_dir] [sigma] [channel] [pretrain_path] [ckpt_name]
+  #通过 bash 命令启动评估 (对 test_dir 等参数的路径格式无要求,内部会自动转为绝对路径以及以"/"结尾)
+  bash run_eval.sh [train_code_path] [test_dir] [sigma] [channel] [pretrain_path] [ckpt_name]
   ```
 
   ```python
@@ -493,8 +493,8 @@ cal_psnr.py 中的主要参数如下:
 - 在 Ascend 310 处理器环境运行
 
   ```python
-  #通过 sh 命令启动推理
-  sh run_infer_310.sh [model_path] [data_path] [noise_image_path] [sigma] [channel] [device_id]
+  #通过 bash 命令启动推理
+  bash run_infer_310.sh [model_path] [data_path] [noise_image_path] [sigma] [channel] [device_id]
   #上述命令将完成推理所需的全部工作。执行完成后,将产生 preprocess.log、infer.log、psnr.log 三个日志文件。
   #如果您需要单独执行各部分代码,可以参照 run_infer_310.sh 内的流程分别进行编译、图片预处理、推理和 PSNR 计算,请注意核对各部分所需参数!
   ```
diff --git a/official/cv/brdnet/cal_psnr.py b/official/cv/brdnet/cal_psnr.py
index 5344d7ad21011c22438105e02f1dad461f7ecb26..04b20ee9146f5df2e6011ba5ec1486f84f0641b3 100644
--- a/official/cv/brdnet/cal_psnr.py
+++ b/official/cv/brdnet/cal_psnr.py
@@ -41,20 +41,23 @@ def calculate_psnr(image1, image2):
     rmse = math.sqrt(np.mean(diff**2.))
     return 20*math.log10(1.0/rmse)
 
-def cal_psnr(output_path, image_path):
-    file_list = glob.glob(image_path+'*') # image_path must end by '/'
+def cal_psnr():
+    image_list = glob.glob(os.path.join(args.image_path, '*'))
     psnr = []   #after denoise
 
     start_time = time.time()
-    for file in file_list:
-        filename = file.split('/')[-1].split('.')[0]    # get the name of image file
+    for image in sorted(image_list):
+        filename = image.split('/')[-1].split('.')[0]    # get the name of image file
         # read image
         if args.channel == 3:
-            img_clean = np.array(Image.open(file), dtype='float32') / 255.0
+            img_clean = np.array(Image.open(image).resize((args.image_width, \
+                                 args.image_height), Image.ANTIALIAS), dtype='float32') / 255.0
         else:
-            img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), dtype='float32') / 255.0, axis=2)
+            assert args.channel == 1
+            img_clean = np.expand_dims(np.array(Image.open(image).resize((args.image_width, \
+                        args.image_height), Image.ANTIALIAS).convert('L'), dtype='float32') / 255.0, axis=2)
 
-        result_file = os.path.join(output_path, filename+"_noise_0.bin")
+        result_file = os.path.join(args.output_path, filename+"_noise_0.bin")
         y_predict = np.fromfile(result_file, dtype=np.float32)
         y_predict = y_predict.reshape(args.channel, args.image_height, args.image_width)
         img_out = y_predict.transpose((1, 2, 0))#HWC
@@ -70,4 +73,4 @@ def cal_psnr(output_path, image_path):
     print("Time cost:"+str(time_used)+" seconds!")
 
 if __name__ == '__main__':
-    cal_psnr(args.output_path, args.image_path)
+    cal_psnr()
diff --git a/official/cv/brdnet/eval.py b/official/cv/brdnet/eval.py
index 0573f2df969ae698d3c482da2d253ab9e741ae54..911aff666b12a36497a8804cc476d2d050bdd845 100644
--- a/official/cv/brdnet/eval.py
+++ b/official/cv/brdnet/eval.py
@@ -87,7 +87,7 @@ def test(model_path):
         args.logger.info("copying test dataset finished....")
         args.test_dir = 'cache/test/'
 
-    file_list = glob.glob(args.test_dir+'*') # args.test_dir must end by '/'
+    file_list = glob.glob(os.path.join(args.test_dir, "*"))
     model.set_train(False)
 
     cast = P.Cast()
@@ -104,7 +104,8 @@ def test(model_path):
         if args.channel == 3:
             img_clean = np.array(Image.open(file), dtype='float32') / 255.0
         else:
-            img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), dtype='float32') / 255.0, axis=2)
+            img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), \
+                                       dtype='float32') / 255.0, axis=2)
 
         np.random.seed(0) #obtain the same random data when it is in the test phase
         img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape)
diff --git a/official/cv/brdnet/export.py b/official/cv/brdnet/export.py
index 1bb99788bd74735cac92ffb40a2698bbff5d756b..32c92bdc70712705b015da690d512918b327213f 100644
--- a/official/cv/brdnet/export.py
+++ b/official/cv/brdnet/export.py
@@ -28,11 +28,11 @@ from src.models import BRDNet
 ## Params
 parser = argparse.ArgumentParser()
 
-parser.add_argument('--batch_size', default=32, type=int, help='batch size')
+parser.add_argument('--batch_size', default=1, type=int, help='batch size')
 parser.add_argument('--channel', default=3, type=int
                     , help='image channel, 3 for color, 1 for gray')
-parser.add_argument("--image_height", type=int, default=50, help="Image height.")
-parser.add_argument("--image_width", type=int, default=50, help="Image width.")
+parser.add_argument("--image_height", type=int, default=500, help="Image height.")
+parser.add_argument("--image_width", type=int, default=500, help="Image width.")
 parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.")
 parser.add_argument("--file_name", type=str, default="brdnet", help="output file name.")
 parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format")
diff --git a/official/cv/brdnet/infer/Dockerfile b/official/cv/brdnet/infer/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..053bf80cd2309a41b6033b3e8d5ab4f87d41bd5e
--- /dev/null
+++ b/official/cv/brdnet/infer/Dockerfile
@@ -0,0 +1,5 @@
+ARG FROM_IMAGE_NAME
+FROM ${FROM_IMAGE_NAME}
+
+COPY requirements.txt .
+RUN pip3.7 install -r requirements.txt
\ No newline at end of file
diff --git a/official/cv/brdnet/infer/README_CN.md b/official/cv/brdnet/infer/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..c539398516b4833f0421ba1ce60a6641b5b36c67
--- /dev/null
+++ b/official/cv/brdnet/infer/README_CN.md
@@ -0,0 +1,269 @@
+# BRDNet MindX推理及mx Base推理
+
+<!-- TOC -->
+
+- [脚本说明](#脚本说明)
+    - [脚本及样例代码](#脚本及样例代码)
+    - [模型转换](#模型转换)
+    - [MindX SDK 启动流程](#mindx-sdk-启动流程)
+    - [mx Base 推理流程](#mx-base-推理流程)
+
+<!-- /TOC -->
+
+## 脚本说明
+
+### 脚本及样例代码
+
+```tex
+├── brdnet
+    ├── README_CN.md                           // brdnet 的说明文件
+    ├── infer
+        ├── README_CN.md                       // brdnet 的 MindX SDK 推理及 mx Base 推理的说明文件
+        ├── convert
+        │   ├──ATC_AIR_2_OM.sh                 // 将 air 模型转换为 om 模型的脚本
+        ├── data
+        │   ├──config
+        │   │   ├──brdnet.pipeline             // MindX SDK运行所需的 pipline 配置文件
+        ├── mxbase                             // mx Base 推理目录(C++)
+        │   ├──src
+        │   │   ├──BRDNet.h                    // 头文件
+        │   │   ├──BRDNet.cpp                  // 详细实现
+        │   │   ├──main.cpp                    // mx Base 主函数
+        │   ├──build.sh                        // 代码编译脚本
+        │   ├──CMakeLists.txt                  // 代码编译设置
+        ├── sdk
+        │   ├──main.py                         // MindX SDK 运行脚本
+        │   ├──run.sh                          // 启动 main.py 的 sh 文件
+    ├── ......                                 // 其他代码文件
+```
+
+### 模型转换
+
+1、首先执行 brdnet 目录下的 export.py 将准备好的权重文件转换为 air 模型文件。
+
+```python
+# 此处简要举例
+python export.py \
+--batch_size=1 \
+--channel=3 \
+--image_height=500 \
+--image_width=500 \
+--ckpt_file=xxx/brdnet.ckpt \
+--file_name=brdnet \
+--file_format=AIR \
+--device_target=Ascend \
+--device_id=0 \
+```
+
+2、然后执行 convert 目录下的 ATC_AIR_2_OM.sh 将刚刚转换好的 air 模型文件转换为 om 模型文件以备后续使用。
+
+```bash
+# bash ./ATC_AIR_2_OM.sh -h 或者 bash ./ATC_AIR_2_OM.sh --help 可以查看帮助信息
+bash ATC_AIR_2_OM.sh [--model --output --soc_version --input_shape]
+```
+
+### MindX SDK 启动流程
+
+```shell
+# 通过 bash 脚本启动 MindX SDK 推理
+# bash ./run.sh -h 或者 bash ./run.sh --help 可以查看帮助信息
+bash ./run.sh [--pipeline --clean_image_path --image_width --image_height --channel --sigma]
+# 注意: data/config/brdnet.pipeline 中默认 MindX SDK 推理所需的模型文件为 "channel_3_sigma_15.om",且放在 data/model/ 目录下,具体可以修改该文件中的 "modelPath" 属性进行配置。
+
+# 通过 python 命令启动 MindX SDK 推理
+python main.py \
+--pipeline=../data/config/brdnet.pipeline \
+--clean_image_path=../Test/Kodak24 \
+--image_width=500 \
+--image_height=500 \
+--channel=3 \
+--sigma=15
+# 注意: data/config/brdnet.pipeline 中默认 MindX SDK 推理所需的模型文件为 "channel_3_sigma_15.om",且放在 data/model/ 目录下,具体可以修改该文件中的 "modelPath" 属性进行配置。
+```
+
+推理结果示例:
+
+```tex
+Begin to initialize Log.
+The output directory of logs file exist.
+Save logs information to specified directory.
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim01.png
+../../BRDNet/Test_dataset/Kodak24/kodim01.png : psnr_denoised:    32.28033733366724
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim02.png
+../../BRDNet/Test_dataset/Kodak24/kodim02.png : psnr_denoised:    35.018032807200164
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim03.png
+../../BRDNet/Test_dataset/Kodak24/kodim03.png : psnr_denoised:    37.80273057933442
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim04.png
+../../BRDNet/Test_dataset/Kodak24/kodim04.png : psnr_denoised:    35.60892146774801
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim05.png
+../../BRDNet/Test_dataset/Kodak24/kodim05.png : psnr_denoised:    33.336266095083175
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim06.png
+../../BRDNet/Test_dataset/Kodak24/kodim06.png : psnr_denoised:    33.738780427944974
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim07.png
+../../BRDNet/Test_dataset/Kodak24/kodim07.png : psnr_denoised:    37.10481992981783
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim08.png
+../../BRDNet/Test_dataset/Kodak24/kodim08.png : psnr_denoised:    33.126510144521
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim09.png
+../../BRDNet/Test_dataset/Kodak24/kodim09.png : psnr_denoised:    37.23759544848104
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim10.png
+../../BRDNet/Test_dataset/Kodak24/kodim10.png : psnr_denoised:    36.954513882215366
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim11.png
+../../BRDNet/Test_dataset/Kodak24/kodim11.png : psnr_denoised:    33.961228532687855
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim12.png
+../../BRDNet/Test_dataset/Kodak24/kodim12.png : psnr_denoised:    35.90416546419448
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim13.png
+../../BRDNet/Test_dataset/Kodak24/kodim13.png : psnr_denoised:    31.199819472546324
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim14.png
+../../BRDNet/Test_dataset/Kodak24/kodim14.png : psnr_denoised:    33.176099577560706
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim15.png
+../../BRDNet/Test_dataset/Kodak24/kodim15.png : psnr_denoised:    34.62721601846573
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim16.png
+../../BRDNet/Test_dataset/Kodak24/kodim16.png : psnr_denoised:    35.10364930038219
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim17.png
+../../BRDNet/Test_dataset/Kodak24/kodim17.png : psnr_denoised:    35.14010929192525
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim18.png
+../../BRDNet/Test_dataset/Kodak24/kodim18.png : psnr_denoised:    33.19858405097709
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim19.png
+../../BRDNet/Test_dataset/Kodak24/kodim19.png : psnr_denoised:    34.92669369486534
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim20.png
+../../BRDNet/Test_dataset/Kodak24/kodim20.png : psnr_denoised:    36.63018276423998
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim21.png
+../../BRDNet/Test_dataset/Kodak24/kodim21.png : psnr_denoised:    34.170664959208786
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim22.png
+../../BRDNet/Test_dataset/Kodak24/kodim22.png : psnr_denoised:    34.182998599615985
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim23.png
+../../BRDNet/Test_dataset/Kodak24/kodim23.png : psnr_denoised:    36.84838635349549
+Denosing image: ../../BRDNet/Test_dataset/Kodak24/kodim24.png
+../../BRDNet/Test_dataset/Kodak24/kodim24.png : psnr_denoised:    34.334034305678436
+Average PSNR: 34.81718085424403
+Testing finished....
+=======================================
+The total time of inference is 2.731436252593994 s
+=======================================
+```
+
+### mx Base 推理流程
+
+mx Base 推理并不直接处理图片和计算去噪后的 PSNR 值。
+
+1、首先执行 brdnet 目录下的 preprocess.py 为测试图片添加噪声并以 "bin" 文件形式进行存储
+
+```python
+# 此处简要举例
+python preprocess.py \
+--out_dir=xx/noise_data_Kodak24 \
+--image_path=xx/Kodak24 \
+--image_height=500 \
+--image_width=500 \
+--channel=3 \
+--sigma=15
+```
+
+2、编译 mx Base
+
+```shell
+bash ./build.sh
+# 编译后的可执行文件 "brdnet" 将保存在当前目录下
+```
+
+3、执行 mx Base 推理
+
+```tex
+./brdnet [model_path input_data_path output_data_path]
+# 按顺序传入模型路径、噪声图像路径、输出路径(需要提前创建)
+例如:
+ ./brdnet ../data/model/channel_3_sigma_15.om ../noise_data_Kodak24/ ./result/
+```
+
+mx Base 推理结果示例:
+
+```tex
+I1106 10:09:26.438470 79291 main.cpp:53] =======================================  !!!Parameters setting!!! ========================================
+I1106 10:09:26.438516 79291 main.cpp:55] ==========  loading model weights from: ../data/model/channel_3_sigma_15.om
+I1106 10:09:26.438527 79291 main.cpp:58] ==========  input data path = ../noise_data_Kodak24/
+I1106 10:09:26.438536 79291 main.cpp:61] ==========  output data path = ./result/ WARNING: please make sure that this folder is created in advance!!!
+I1106 10:09:26.438544 79291 main.cpp:63] ========================================  !!!Parameters setting!!! ========================================
+I1106 10:09:26.798825 79291 ModelInferenceProcessor.cpp:22] Begin to ModelInferenceProcessor init
+I1106 10:09:26.863025 79291 ModelInferenceProcessor.cpp:69] End to ModelInferenceProcessor init
+I1106 10:09:26.863147 79291 main.cpp:82] Processing: 1/24 ---> kodim24_noise.bin
+I1106 10:09:26.980234 79291 main.cpp:82] Processing: 2/24 ---> kodim18_noise.bin
+I1106 10:09:27.096143 79291 main.cpp:82] Processing: 3/24 ---> kodim07_noise.bin
+I1106 10:09:27.213531 79291 main.cpp:82] Processing: 4/24 ---> kodim19_noise.bin
+I1106 10:09:27.328680 79291 main.cpp:82] Processing: 5/24 ---> kodim02_noise.bin
+I1106 10:09:27.444927 79291 main.cpp:82] Processing: 6/24 ---> kodim20_noise.bin
+I1106 10:09:27.558817 79291 main.cpp:82] Processing: 7/24 ---> kodim12_noise.bin
+I1106 10:09:27.675061 79291 main.cpp:82] Processing: 8/24 ---> kodim21_noise.bin
+I1106 10:09:27.791473 79291 main.cpp:82] Processing: 9/24 ---> kodim14_noise.bin
+I1106 10:09:27.906719 79291 main.cpp:82] Processing: 10/24 ---> kodim16_noise.bin
+I1106 10:09:28.023947 79291 main.cpp:82] Processing: 11/24 ---> kodim01_noise.bin
+I1106 10:09:28.140027 79291 main.cpp:82] Processing: 12/24 ---> kodim23_noise.bin
+I1106 10:09:28.255630 79291 main.cpp:82] Processing: 13/24 ---> kodim17_noise.bin
+I1106 10:09:28.369719 79291 main.cpp:82] Processing: 14/24 ---> kodim05_noise.bin
+I1106 10:09:28.485267 79291 main.cpp:82] Processing: 15/24 ---> kodim22_noise.bin
+I1106 10:09:28.600522 79291 main.cpp:82] Processing: 16/24 ---> kodim13_noise.bin
+I1106 10:09:28.716308 79291 main.cpp:82] Processing: 17/24 ---> kodim09_noise.bin
+I1106 10:09:28.830880 79291 main.cpp:82] Processing: 18/24 ---> kodim06_noise.bin
+I1106 10:09:28.945564 79291 main.cpp:82] Processing: 19/24 ---> kodim03_noise.bin
+I1106 10:09:29.061424 79291 main.cpp:82] Processing: 20/24 ---> kodim04_noise.bin
+I1106 10:09:29.176980 79291 main.cpp:82] Processing: 21/24 ---> kodim10_noise.bin
+I1106 10:09:29.292285 79291 main.cpp:82] Processing: 22/24 ---> kodim11_noise.bin
+I1106 10:09:29.406962 79291 main.cpp:82] Processing: 23/24 ---> kodim15_noise.bin
+I1106 10:09:29.521801 79291 main.cpp:82] Processing: 24/24 ---> kodim08_noise.bin
+I1106 10:09:29.637691 79291 main.cpp:91] infer succeed and write the result data with binary file !
+I1106 10:09:29.771848 79291 DeviceManager.cpp:83] DestroyDevices begin
+I1106 10:09:29.771868 79291 DeviceManager.cpp:85] destroy device:0
+I1106 10:09:29.954421 79291 DeviceManager.cpp:91] aclrtDestroyContext successfully!
+I1106 10:09:31.532470 79291 DeviceManager.cpp:99] DestroyDevices successfully
+I1106 10:09:31.532511 79291 main.cpp:98] Infer images sum 24, cost total time: 2535.4 ms.
+I1106 10:09:31.532536 79291 main.cpp:99] The throughput: 9.46598 bin/sec.
+I1106 10:09:31.532541 79291 main.cpp:100] ==========  The infer result has been saved in ---> ./result/
+```
+
+mx Base 的推理结果为 "去噪后的图片",将以 "bin" 文件的形式存储在指定路径下。
+
+4、计算去噪后的 PSNR 值
+
+如果需要计算去噪后的 PSNR 值,请执行 brdnet 目录下的 cal_psnr.py 脚本
+
+```python
+# 此处简要举例
+python cal_psnr.py \
+--image_path=xx/Kodak24 \
+--output_path=xx/Kodak24 \
+--image_height=500 \
+--image_width=500 \
+--channel=3
+image_path 指原不含噪声的图片路径,output_path 指 mx Base 推理后的结果保存路径
+```
+
+PSNR 计算结果示例:
+
+```tex
+kodim01 : psnr_denoised:    32.28033733366724
+kodim02 : psnr_denoised:    35.018032807200164
+kodim03 : psnr_denoised:    37.80273057933442
+kodim04 : psnr_denoised:    35.60892146774801
+kodim05 : psnr_denoised:    33.336266095083175
+kodim06 : psnr_denoised:    33.738780427944974
+kodim07 : psnr_denoised:    37.10481992981783
+kodim08 : psnr_denoised:    33.126510144521
+kodim09 : psnr_denoised:    37.23759544848104
+kodim10 : psnr_denoised:    36.954513882215366
+kodim11 : psnr_denoised:    33.961228532687855
+kodim12 : psnr_denoised:    35.90416546419448
+kodim13 : psnr_denoised:    31.199819472546324
+kodim14 : psnr_denoised:    33.176099577560706
+kodim15 : psnr_denoised:    34.62721601846573
+kodim16 : psnr_denoised:    35.10364930038219
+kodim17 : psnr_denoised:    35.14010929192525
+kodim18 : psnr_denoised:    33.19858405097709
+kodim19 : psnr_denoised:    34.92669369486534
+kodim20 : psnr_denoised:    36.63018276423998
+kodim21 : psnr_denoised:    34.170664959208786
+kodim22 : psnr_denoised:    34.182998599615985
+kodim23 : psnr_denoised:    36.84838635349549
+kodim24 : psnr_denoised:    34.334034305678436
+Average PSNR: 34.81718085424403
+Testing finished....
+Time cost:1.0340161323547363 seconds!
+```
diff --git a/official/cv/brdnet/infer/convert/ATC_AIR_2_OM.sh b/official/cv/brdnet/infer/convert/ATC_AIR_2_OM.sh
new file mode 100644
index 0000000000000000000000000000000000000000..18bf449d04e193e2319cb386953be45537db7d41
--- /dev/null
+++ b/official/cv/brdnet/infer/convert/ATC_AIR_2_OM.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+soc_version=Ascend310
+input_shape="x:1,3,500,500"
+# help message
+if [[ $1 == --help || $1 == -h ]];then
+    echo "usage:bash ./ATC_AIR_2_OM.sh <args>"
+    echo "parameter explain:
+    --model                  set model place, e.g. --model=../8p_channel_3_sigma_15_rank_0-50_4556.air
+    --output                 set the name and place of OM model, e.g. --output=../data/model/channel_3_sigma_15
+    --soc_version            set the soc_version, default: --soc_version=Ascend310
+    --input_shape            set the input node and shape, default: --input_shape='x:1,3,500,500'
+    -h/--help                show help message
+    "
+    exit 1
+fi
+
+for para in "$@"
+do
+    if [[ $para == --model* ]];then
+        model=`echo ${para#*=}`
+    elif [[ $para == --output* ]];then
+        output=`echo ${para#*=}`
+    elif [[ $para == --soc_version* ]];then
+        soc_version=`echo ${para#*=}`
+    elif [[ $para == --input_shape* ]];then
+        input_shape=`echo ${para#*=}`
+    fi
+done
+
+if [[ $model  == "" ]];then
+   echo "[Error] para \"model \" must be config"
+   exit 1
+fi
+
+if [[ $output  == "" ]];then
+   echo "[Error] para \"output \" must be config"
+   exit 1
+fi
+
+atc \
+    --model=${model} \
+    --output=${output} \
+    --soc_version=${soc_version} \
+    --input_shape=${input_shape} \
+    --framework=1 \
+    --input_format=NCHW
\ No newline at end of file
diff --git a/official/cv/brdnet/infer/data/config/brdnet.pipeline b/official/cv/brdnet/infer/data/config/brdnet.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..f8b76af5029405f932208651ddd31883e7866a01
--- /dev/null
+++ b/official/cv/brdnet/infer/data/config/brdnet.pipeline
@@ -0,0 +1,26 @@
+{
+"brdnet": {
+    "appsrc0": {
+        "factory": "appsrc",
+        "next": "modelInfer"
+        },
+    "modelInfer": {
+        "props": {
+            "modelPath": "../data/model/channel_3_sigma_15.om",
+            "dataSource": "appsrc0"
+        },
+        "factory": "mxpi_tensorinfer",
+        "next": "dataserialize"
+        },
+    "dataserialize": {
+        "props": {
+             "outputDataKeys": "modelInfer"
+        },
+        "factory": "mxpi_dataserialize",
+        "next": "appsink0"
+    },
+    "appsink0": {
+        "factory": "appsink"
+    }
+  }
+}
diff --git a/official/cv/brdnet/infer/docker_start_infer.sh b/official/cv/brdnet/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..64cf90a2311bdfb21d68a4e90e08602670fdf632
--- /dev/null
+++ b/official/cv/brdnet/infer/docker_start_infer.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_dir=$2
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image data_dir"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${data_dir}" ]; then
+        echo "please input data_dir"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it \
+  --device=/dev/davinci0 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${data_dir}:${data_dir} \
+  ${docker_image} \
+  /bin/bash
diff --git a/official/cv/brdnet/infer/mxbase/CMakeLists.txt b/official/cv/brdnet/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..957e9ad873dc337728c321b9704cecec997b5436
--- /dev/null
+++ b/official/cv/brdnet/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,49 @@
+cmake_minimum_required(VERSION 3.14.0)
+project(brdnet)
+set(TARGET brdnet)
+add_definitions(-DENABLE_DVPP_INTERFACE)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+add_definitions(-Dgoogle=mindxsdk_private)
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
+# Check environment variable
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+if(NOT DEFINED ENV{ASCEND_VERSION})
+    message(WARNING "please define environment variable:ASCEND_VERSION")
+endif()
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include)
+set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64)
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
+
+if(NOT DEFINED ENV{MXSDK_OPENSOURCE_DIR})
+    message(WARNING "please define environment variable:MXSDK_OPENSOURCE_DIR")
+endif()
+
+set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
+
+include_directories(src)
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+
+
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+add_executable(${TARGET} src/main.cpp src/BRDNet.cpp)
+target_link_libraries(${TARGET} glog cpprest mxbase opencv_world stdc++fs)
+install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
\ No newline at end of file
diff --git a/official/cv/brdnet/infer/mxbase/build.sh b/official/cv/brdnet/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..925f6b1a8bcf253215a28d18a55fa8344e25ae14
--- /dev/null
+++ b/official/cv/brdnet/infer/mxbase/build.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+export ASCEND_HOME=/usr/local/Ascend
+export ASCEND_VERSION=nnrt/latest
+export ARCH_PATTERN=.
+export MXSDK_OPENSOURCE_DIR=/usr/local/sdk_home/mxManufacture/opensource
+export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib/plugins:${MX_SDK_HOME}/opensource/lib64:${MX_SDK_HOME}/lib:${MX_SDK_HOME}/lib/modelpostprocessors:${MX_SDK_HOME}/opensource/lib:/usr/local/Ascend/nnae/latest/fwkacllib/lib64:${LD_LIBRARY_PATH}"
+export ASCEND_OPP_PATH="/usr/local/Ascend/nnae/latest/opp"
+export ASCEND_AICPU_PATH="/usr/local/Ascend/nnae/latest"
+
+function check_env()
+{
+    # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user
+    if [ ! "${ASCEND_VERSION}" ]; then
+        export ASCEND_VERSION=ascend-toolkit/latest
+        echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}"
+    else
+        echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user"
+    fi
+
+    if [ ! "${ARCH_PATTERN}" ]; then
+        # set ARCH_PATTERN to ./ when it was not specified by user
+        export ARCH_PATTERN=./
+        echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}"
+    else
+        echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user"
+    fi
+}
+
+function build_brdnet()
+{
+    cd .
+    rm -rf build
+    mkdir -p build
+    cd build
+    cmake ..
+    make
+    ret=$?
+    if [ ${ret} -ne 0 ]; then
+        echo "Failed to build brdnet."
+        exit ${ret}
+    fi
+    make install
+}
+
+rm -rf ./result
+mkdir -p ./result
+
+check_env
+build_brdnet
\ No newline at end of file
diff --git a/official/cv/brdnet/infer/mxbase/src/BRDNet.cpp b/official/cv/brdnet/infer/mxbase/src/BRDNet.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2ec0b49c340265c004155ff609dcae2a344408e1
--- /dev/null
+++ b/official/cv/brdnet/infer/mxbase/src/BRDNet.cpp
@@ -0,0 +1,177 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "BRDNet.h"
+#include <unistd.h>
+#include <sys/stat.h>
+#include <map>
+#include <fstream>
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+APP_ERROR BRDNet::Init(const InitParam &initParam) {
+    this->deviceId_ = initParam.deviceId;
+    this->outputDataPath_ = initParam.outputDataPath;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    this->model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = this->model_->Init(initParam.modelPath, this->modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    uint32_t input_data_size = 1;
+    for (size_t j = 0; j < this->modelDesc_.inputTensors[0].tensorDims.size(); ++j) {
+        this->inputDataShape_[j] = (uint32_t)this->modelDesc_.inputTensors[0].tensorDims[j];
+        input_data_size *= this->inputDataShape_[j];
+    }
+    this->inputDataSize_ = input_data_size;
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR BRDNet::DeInit() {
+    this->model_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR BRDNet::ReadTensorFromFile(const std::string &file, float *data) {
+    if (data == NULL) {
+        LogError << "input data is invalid.";
+        return APP_ERR_COMM_INVALID_POINTER;
+    }
+
+    std::ifstream infile;
+    // open data file
+    infile.open(file, std::ios_base::in | std::ios_base::binary);
+    // check data file validity
+    if (infile.fail()) {
+        LogError << "Failed to open data file: " << file << ".";
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    infile.read(reinterpret_cast<char*>(data), sizeof(float) * this->inputDataSize_);
+    infile.close();
+    return APP_ERR_OK;
+}
+
+APP_ERROR BRDNet::ReadInputTensor(const std::string &fileName, std::vector<MxBase::TensorBase> *inputs) {
+    float data[this->inputDataSize_] = {0};
+    APP_ERROR ret = ReadTensorFromFile(fileName, data);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadTensorFromFile failed.";
+        return ret;
+    }
+    const uint32_t dataSize = this->modelDesc_.inputTensors[0].tensorSize;
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, this->deviceId_);
+    MxBase::MemoryData memoryDataSrc(reinterpret_cast<void*>(data), dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc and copy failed.";
+        return ret;
+    }
+
+    inputs->push_back(MxBase::TensorBase(memoryDataDst, false, this->inputDataShape_, MxBase::TENSOR_DTYPE_FLOAT32));
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR BRDNet::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                                 std::vector<MxBase::TensorBase> *outputs) {
+    auto dtypes = this->model_->GetOutputDataType();
+    for (size_t i = 0; i < this->modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)this->modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, this->deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs->push_back(tensor);
+    }
+
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = this->model_->ModelInference(inputs, *outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    g_inferCost.push_back(costMs);
+
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR  BRDNet::WriteResult(const std::string &imageFile, std::vector<MxBase::TensorBase> outputs) {
+    for (size_t i = 0; i < outputs.size(); ++i) {
+        APP_ERROR ret = outputs[i].ToHost();
+        if (ret != APP_ERR_OK) {
+            LogError << GetError(ret) << "tohost fail.";
+            return ret;
+        }
+        void *netOutput = outputs[i].GetBuffer();
+        std::vector<uint32_t> out_shape = outputs[i].GetShape();
+        int pos = imageFile.rfind('/');
+        std::string fileName(imageFile, pos + 1);
+        fileName.replace(fileName.find('.'), fileName.size() - fileName.find('.'), "_0.bin");
+        std::string outFileName = this->outputDataPath_ + "/" + fileName;
+        FILE *outputFile_ = fopen(outFileName.c_str(), "wb");
+        fwrite(netOutput, out_shape[0]*out_shape[1]*out_shape[2]*out_shape[3], sizeof(float), outputFile_);
+        fclose(outputFile_);
+    }
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR BRDNet::Process(const std::string &inferPath, const std::string &fileName) {
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::string inputIdsFile = inferPath + fileName;
+    APP_ERROR ret = ReadInputTensor(inputIdsFile, &inputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Read input ids failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::vector<MxBase::TensorBase> outputs = {};
+    ret = Inference(inputs, &outputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = WriteResult(fileName, outputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Write result failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
diff --git a/official/cv/brdnet/infer/mxbase/src/BRDNet.h b/official/cv/brdnet/infer/mxbase/src/BRDNet.h
new file mode 100644
index 0000000000000000000000000000000000000000..808f1ecfb43806f83edaddb91ee6c4c7601a6ef0
--- /dev/null
+++ b/official/cv/brdnet/infer/mxbase/src/BRDNet.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_BRDNET_H
+#define MXBASE_BRDNET_H
+
+#include <memory>
+#include <utility>
+#include <vector>
+#include <string>
+#include <map>
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+extern std::vector<double> g_inferCost;
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string modelPath;
+    std::string outputDataPath;
+};
+
+class BRDNet {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+    APP_ERROR DeInit();
+    APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> *outputs);
+    APP_ERROR Process(const std::string &inferPath, const std::string &fileName);
+
+ protected:
+    APP_ERROR ReadTensorFromFile(const std::string &file, float *data);
+    APP_ERROR ReadInputTensor(const std::string &fileName, std::vector<MxBase::TensorBase> *inputs);
+    APP_ERROR WriteResult(const std::string &imageFile, std::vector<MxBase::TensorBase> outputs);
+
+ private:
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    MxBase::ModelDesc modelDesc_ = {};
+    uint32_t deviceId_ = 0;
+    std::string outputDataPath_ = "./result";
+    std::vector<uint32_t> inputDataShape_ = {1, 3, 500, 500};
+    uint32_t inputDataSize_ = 750000;
+};
+
+#endif
diff --git a/official/cv/brdnet/infer/mxbase/src/main.cpp b/official/cv/brdnet/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..37bd33313e57d177b88cbb48caf62dfd5cc1bc62
--- /dev/null
+++ b/official/cv/brdnet/infer/mxbase/src/main.cpp
@@ -0,0 +1,105 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <dirent.h>
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include "BRDNet.h"
+#include "MxBase/Log/Log.h"
+
+std::vector<double> g_inferCost;
+
+void InitProtonetParam(InitParam* initParam, const std::string &model_path, const std::string &output_data_path) {
+    initParam->deviceId = 0;
+    initParam->modelPath = model_path;
+    initParam->outputDataPath = output_data_path;
+}
+
+APP_ERROR ReadFilesFromPath(const std::string &path, std::vector<std::string> *files) {
+    DIR *dir = NULL;
+    struct dirent *ptr = NULL;
+
+    if ((dir=opendir(path.c_str())) == NULL) {
+        LogError << "Open dir error: " << path;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+
+    while ((ptr=readdir(dir)) != NULL) {
+        if (ptr->d_type == 8) {
+            files->push_back(ptr->d_name);
+        }
+    }
+    closedir(dir);
+    return APP_ERR_OK;
+}
+
+
+int main(int argc, char* argv[]) {
+    LogInfo << "=======================================  !!!Parameters setting!!!" << \
+               "========================================";
+    std::string model_path = argv[1];
+    LogInfo << "==========  loading model weights from: " << model_path;
+
+    std::string input_data_path = argv[2];
+    LogInfo << "==========  input data path = " << input_data_path;
+
+    std::string output_data_path = argv[3];
+    LogInfo << "==========  output data path = " << output_data_path << \
+               " WARNING: please make sure that this folder is created in advance!!!";
+
+    LogInfo << "========================================  !!!Parameters setting!!! " << \
+               "========================================";
+
+    InitParam initParam;
+    InitProtonetParam(&initParam, model_path, output_data_path);
+    auto brdnet = std::make_shared<BRDNet>();
+    APP_ERROR ret = brdnet->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "BRDNet init failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::vector<std::string> files;
+    ret = ReadFilesFromPath(input_data_path, &files);
+    if (ret != APP_ERR_OK) {
+        LogError << "Read files from path failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    // do infer
+    for (uint32_t i = 0; i < files.size(); i++) {
+        LogInfo << "Processing: " + std::to_string(i+1) + "/" + std::to_string(files.size()) + " ---> " + files[i];
+        ret = brdnet->Process(input_data_path, files[i]);
+        if (ret != APP_ERR_OK) {
+            LogError << "BRDNet process failed, ret=" << ret << ".";
+            brdnet->DeInit();
+            return ret;
+        }
+    }
+
+    LogInfo << "infer succeed and write the result data with binary file !";
+
+    brdnet->DeInit();
+    double costSum = 0;
+    for (uint32_t i = 0; i < g_inferCost.size(); i++) {
+        costSum += g_inferCost[i];
+    }
+    LogInfo << "Infer images sum " << g_inferCost.size() << ", cost total time: " << costSum << " ms.";
+    LogInfo << "The throughput: " << g_inferCost.size() * 1000 / costSum << " bin/sec.";
+    LogInfo << "==========  The infer result has been saved in ---> " << output_data_path;
+    return APP_ERR_OK;
+}
diff --git a/official/cv/brdnet/infer/requirements.txt b/official/cv/brdnet/infer/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..354fd77807a01877d28d75ff2cda3825c7aa7cb5
--- /dev/null
+++ b/official/cv/brdnet/infer/requirements.txt
@@ -0,0 +1,2 @@
+pillow
+numpy
\ No newline at end of file
diff --git a/official/cv/brdnet/infer/sdk/main.py b/official/cv/brdnet/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e34a3ee4a65e0ac70b41bf3151a26c8b3ce1fce
--- /dev/null
+++ b/official/cv/brdnet/infer/sdk/main.py
@@ -0,0 +1,165 @@
+'''
+The scripts to execute sdk infer
+'''
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import argparse
+import os
+import glob
+import time
+import math
+import PIL.Image as Image
+
+import MxpiDataType_pb2 as MxpiDataType
+import numpy as np
+from StreamManagerApi import StreamManagerApi, InProtobufVector, \
+    MxProtobufIn, StringVector
+
+
+def parse_args():
+    """set and check parameters."""
+    parser = argparse.ArgumentParser(description="BRDNet process")
+    parser.add_argument("--pipeline", type=str, default=None, help="SDK infer pipeline")
+    parser.add_argument("--clean_image_path", type=str, default=None, help="root path of image without noise")
+    parser.add_argument('--image_width', default=500, type=int, help='resized image width')
+    parser.add_argument('--image_height', default=500, type=int, help='resized image height')
+    parser.add_argument('--channel', default=3, type=int
+                        , help='image channel, 3 for color, 1 for gray')
+    parser.add_argument('--sigma', type=int, default=15, help='level of noise')
+    args_opt = parser.parse_args()
+    return args_opt
+
+def calculate_psnr(image1, image2):
+    image1 = np.float64(image1)
+    image2 = np.float64(image2)
+    diff = image1 - image2
+    diff = diff.flatten('C')
+    rmse = math.sqrt(np.mean(diff**2.))
+    return 20*math.log10(1.0/rmse)
+
+def send_source_data(appsrc_id, tensor, stream_name, stream_manager):
+    """
+    Construct the input of the stream,
+    send inputs data to a specified stream based on streamName.
+
+    Returns:
+        bool: send data success or not
+    """
+    tensor_package_list = MxpiDataType.MxpiTensorPackageList()
+    tensor_package = tensor_package_list.tensorPackageVec.add()
+    array_bytes = tensor.tobytes()
+    tensor_vec = tensor_package.tensorVec.add()
+    tensor_vec.deviceId = 0
+    tensor_vec.memType = 0
+    for i in tensor.shape:
+        tensor_vec.tensorShape.append(i)
+    tensor_vec.dataStr = array_bytes
+    tensor_vec.tensorDataSize = len(array_bytes)
+    key = "appsrc{}".format(appsrc_id).encode('utf-8')
+    protobuf_vec = InProtobufVector()
+    protobuf = MxProtobufIn()
+    protobuf.key = key
+    protobuf.type = b'MxTools.MxpiTensorPackageList'
+    protobuf.protobuf = tensor_package_list.SerializeToString()
+    protobuf_vec.push_back(protobuf)
+
+    ret = stream_manager.SendProtobuf(stream_name, appsrc_id, protobuf_vec)
+    if ret < 0:
+        print("Failed to send data to stream.")
+        return False
+    return True
+
+
+def main():
+    """
+    read pipeline and do infer
+    """
+
+    args = parse_args()
+
+    # init stream manager
+    stream_manager_api = StreamManagerApi()
+    ret = stream_manager_api.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        return
+
+    # create streams by pipeline config file
+    with open(os.path.realpath(args.pipeline), 'rb') as f:
+        pipeline_str = f.read()
+    ret = stream_manager_api.CreateMultipleStreams(pipeline_str)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        return
+
+    stream_name = b'brdnet'
+    infer_total_time = 0
+    psnr = []   #after denoise
+    image_list = glob.glob(os.path.join(args.clean_image_path, '*'))
+    if not os.path.exists("./outputs"):
+        os.makedirs("./outputs")
+    with open("./outputs/denoise_results.txt", 'w') as f:
+        for image in sorted(image_list):
+            print("Denosing image:", image)# read image
+            if args.channel == 3:
+                img_clean = np.array(Image.open(image).resize((args.image_width, args.image_height), \
+                                     Image.ANTIALIAS), dtype='float32') / 255.0
+            else:
+                img_clean = np.expand_dims(np.array(Image.open(image).resize((args.image_width, \
+                            args.image_height), Image.ANTIALIAS).convert('L'), dtype='float32') / 255.0, axis=2)
+            np.random.seed(0) #obtain the same random data when it is in the test phase
+            img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape).astype(np.float32)#HWC
+            noise_image = np.expand_dims(img_test.transpose((2, 0, 1)), 0)#NCHW
+
+            if not send_source_data(0, noise_image, stream_name, stream_manager_api):
+                return
+            # Obtain the inference result by specifying streamName and uniqueId.
+            key_vec = StringVector()
+            key_vec.push_back(b'modelInfer')
+            start_time = time.time()
+            infer_result = stream_manager_api.GetProtobuf(stream_name, 0, key_vec)
+            infer_total_time += time.time() - start_time
+            if infer_result.size() == 0:
+                print("inferResult is null")
+                return
+            if infer_result[0].errorCode != 0:
+                print("GetProtobuf error. errorCode=%d" % (infer_result[0].errorCode))
+                return
+            result = MxpiDataType.MxpiTensorPackageList()
+            result.ParseFromString(infer_result[0].messageBuf)
+            res = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4')
+            y_predict = res.reshape(args.channel, args.image_height, args.image_width)
+            img_out = y_predict.transpose((1, 2, 0))#HWC
+            img_out = np.clip(img_out, 0, 1)
+            psnr_denoised = calculate_psnr(img_clean, img_out)
+            psnr.append(psnr_denoised)
+            print(image, ": psnr_denoised: ", " ", psnr_denoised)
+            print(image, ": psnr_denoised: ", " ", psnr_denoised, file=f)
+            filename = image.split('/')[-1].split('.')[0]    # get the name of image file
+            img_out.tofile(os.path.join("./outputs", filename+'_denoise.bin'))
+        psnr_avg = sum(psnr)/len(psnr)
+        print("Average PSNR:", psnr_avg)
+        print("Average PSNR:", psnr_avg, file=f)
+    print("Testing finished....")
+    print("=======================================")
+    print("The total time of inference is {} s".format(infer_total_time))
+    print("=======================================")
+
+    # destroy streams
+    stream_manager_api.DestroyAllStreams()
+
+if __name__ == '__main__':
+    main()
diff --git a/official/cv/brdnet/infer/sdk/run.sh b/official/cv/brdnet/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b65909af354b9c836b6f22458f09ede4943cff8a
--- /dev/null
+++ b/official/cv/brdnet/infer/sdk/run.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+image_width=500
+image_height=500
+channel=3
+sigma=15
+# help message
+if [[ $1 == --help || $1 == -h ]];then
+    echo "usage:bash ./run.sh <args>"
+    echo "parameter explain:
+    --pipeline            set SDK infer pipeline, e.g. --pipeline=../data/config/brdnet.pipeline
+    --clean_image_path    set the root path of image without noise, e.g. --clean_image_path=./Kodak24
+    --image_width         set the resized image width, default: --image_width=500
+    --image_height        set the resized image height, default: --image_height=500
+    --channel             set the image channel, 3 for color, 1 for gray, default: --channel=3
+    --sigma               set the level of noise, default: --sigma=15
+    -h/--help             show help message
+    "
+    exit 1
+fi
+
+for para in "$@"
+do
+    if [[ $para == --pipeline* ]];then
+        pipeline=`echo ${para#*=}`
+    elif [[ $para == --clean_image_path* ]];then
+        clean_image_path=`echo ${para#*=}`
+    elif [[ $para == --image_width* ]];then
+        image_width=`echo ${para#*=}`
+    elif [[ $para == --image_height* ]];then
+        image_height=`echo ${para#*=}`
+    elif [[ $para == --channel* ]];then
+        channel=`echo ${para#*=}`
+    elif [[ $para == --sigma* ]];then
+        sigma=`echo ${para#*=}`
+    fi
+done
+
+if [[ $pipeline  == "" ]];then
+   echo "[Error] para \"pipeline \" must be config"
+   exit 1
+fi
+if [[ $clean_image_path  == "" ]];then
+   echo "[Error] para \"clean_image_path \" must be config"
+   exit 1
+fi
+
+python3 main.py --pipeline=$pipeline \
+                  --clean_image_path=$clean_image_path \
+                  --image_width=$image_width \
+                  --image_height=$image_height \
+                  --channel=$channel \
+                  --sigma=$sigma
+
+exit 0
diff --git a/official/cv/brdnet/modelarts/start_train.py b/official/cv/brdnet/modelarts/start_train.py
new file mode 100644
index 0000000000000000000000000000000000000000..953a541225adb5749a8addf3167dff68a3082135
--- /dev/null
+++ b/official/cv/brdnet/modelarts/start_train.py
@@ -0,0 +1,252 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''training script for modelarts'''
+import os
+import glob
+import datetime
+import argparse
+import moxing as mox
+import numpy as np
+import PIL.Image as Image
+
+import mindspore
+import mindspore.nn as nn
+from mindspore import context, export
+from mindspore.train import Model
+from mindspore.common import set_seed
+from mindspore.ops import operations as P
+from mindspore.ops import composite as C
+from mindspore.context import ParallelMode
+from mindspore.common.tensor import Tensor
+from mindspore.train.callback import TimeMonitor, LossMonitor
+from mindspore import load_checkpoint, load_param_into_net
+from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
+from mindspore.communication.management import init, get_rank, get_group_size
+
+from src.logger import get_logger
+from src.dataset import create_BRDNetDataset
+from src.models import BRDNet, BRDWithLossCell, TrainingWrapper
+
+
+## Params
+parser = argparse.ArgumentParser()
+
+parser.add_argument('--batch_size', default=32, type=int, help='batch size')
+parser.add_argument('--train_data', default='../dataset/waterloo5050step40colorimage/'
+                    , type=str, help='path of train data')
+parser.add_argument('--test_dir', default='./Test/Kodak24/'
+                    , type=str, help='directory of test dataset')
+parser.add_argument('--sigma', default=15, type=int, help='noise level')
+parser.add_argument('--channel', default=3, type=int
+                    , help='image channel, 3 for color, 1 for gray')
+parser.add_argument('--epoch', default=50, type=int, help='number of train epoches')
+parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate for Adam')
+parser.add_argument('--save_every', default=1, type=int, help='save model at every x epoches')
+parser.add_argument('--resume_path', type=str, default=None,
+                    help='put the path to resuming file if needed')
+parser.add_argument('--resume_name', type=str, default=None,
+                    help='resuming file name')
+parser.add_argument("--image_height", type=int, default=500, help="Image height for exporting model.")
+parser.add_argument("--image_width", type=int, default=500, help="Image width for exporting model.")
+parser.add_argument('--train_url', type=str, default='train_url/'
+                    , help='needed by modelarts, but we donot use it because the name is ambiguous')
+parser.add_argument('--data_url', type=str, default='data_url/'
+                    , help='needed by modelarts, but we donot use it because the name is ambiguous')
+parser.add_argument('--output_path', type=str, default='./output/'
+                    , help='output_path,when use_modelarts is set True, it will be cache/output/')
+parser.add_argument('--outer_path', type=str, default='s3://output/'
+                    , help='obs path,to store e.g ckpt files ')
+parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR"\
+                    , help="file format")
+
+parser.add_argument('--device_target', type=str, default='Ascend'
+                    , help='device where the code will be implemented. (Default: Ascend)')
+parser.add_argument('--is_distributed', type=int, default=0, help='if multi device')
+parser.add_argument('--rank', type=int, default=0, help='local rank of distributed')
+parser.add_argument('--group_size', type=int, default=1, help='world size of distributed')
+parser.add_argument('--is_save_on_master', type=int, default=1, help='save ckpt on master or all rank')
+parser.add_argument('--ckpt_save_max', type=int, default=20
+                    , help='Maximum number of checkpoint files can be saved. Default: 20.')
+
+set_seed(1)
+args = parser.parse_args()
+save_dir = os.path.join(args.output_path, 'sigma_' + str(args.sigma) \
+           + '_' + datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
+
+def get_lr(steps_per_epoch, max_epoch, init_lr):
+    lr_each_step = []
+    while max_epoch > 0:
+        tem = min(30, max_epoch)
+        for _ in range(steps_per_epoch*tem):
+            lr_each_step.append(init_lr)
+        max_epoch -= tem
+        init_lr /= 10
+    return lr_each_step
+
+device_id = int(os.getenv('DEVICE_ID', '0'))
+context.set_context(mode=context.GRAPH_MODE, enable_auto_mixed_precision=True,
+                    device_target=args.device_target, save_graphs=False)
+
+def copy_data_from_obs():
+    args.logger.info("copying train data from obs to cache....")
+    mox.file.copy_parallel(args.train_data, 'cache/dataset')
+    args.logger.info("copying train data finished....")
+    args.train_data = 'cache/dataset/'
+
+    # resume checkpoint if needed
+    if args.resume_path:
+        args.logger.info("copying resume checkpoint from obs to cache....")
+        mox.file.copy_parallel(args.resume_path, 'cache/resume_path')
+        args.logger.info("copying resume checkpoint finished....")
+        args.resume_path = 'cache/resume_path/'
+
+    args.logger.info("copying test data from obs to cache....")
+    mox.file.copy_parallel(args.test_dir, 'cache/test')
+    args.logger.info("copying test data finished....")
+    args.test_dir = 'cache/test/'
+
+def copy_data_to_obs():
+    args.logger.info("copying files from cache to obs....")
+    mox.file.copy_parallel(save_dir, args.outer_path)
+    args.logger.info("copying finished....")
+
+def check_best_model():
+    ckpt_list = glob.glob(os.path.join(save_dir, 'ckpt_' + str(args.rank) + '/*.ckpt'))
+    model = BRDNet(args.channel)
+    transpose = P.Transpose()
+    expand_dims = P.ExpandDims()
+    compare_psnr = nn.PSNR()
+    compare_ssim = nn.SSIM()
+    best_psnr = 0.
+    args.best_ckpt = ""
+    for ckpt in sorted(ckpt_list):
+        args.logger.info("testing ckpt: " + str(ckpt))
+        load_param_into_net(model, load_checkpoint(ckpt))
+        psnr = []   #after denoise
+        ssim = []   #after denoise
+        file_list = glob.glob(os.path.join(args.test_dir, "*"))
+        model.set_train(False)
+        for file in file_list:
+            # read image
+            if args.channel == 3:
+                img_clean = np.array(Image.open(file), dtype='float32') / 255.0
+            else:
+                img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), \
+                                           dtype='float32') / 255.0, axis=2)
+            np.random.seed(0) #obtain the same random data when it is in the test phase
+            img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape)
+            img_clean = Tensor(img_clean, mindspore.float32) #HWC
+            img_test = Tensor(img_test, mindspore.float32)   #HWC
+            # predict
+            img_clean = expand_dims(transpose(img_clean, (2, 0, 1)), 0)#NCHW
+            img_test = expand_dims(transpose(img_test, (2, 0, 1)), 0)#NCHW
+            y_predict = model(img_test)    #NCHW
+            # calculate numeric metrics
+            img_out = C.clip_by_value(y_predict, 0, 1)
+            psnr_denoised = compare_psnr(img_clean, img_out)
+            ssim_denoised = compare_ssim(img_clean, img_out)
+            psnr.append(psnr_denoised.asnumpy()[0])
+            ssim.append(ssim_denoised.asnumpy()[0])
+        psnr_avg = sum(psnr)/len(psnr)
+        ssim_avg = sum(ssim)/len(ssim)
+        if psnr_avg > best_psnr:
+            best_psnr = psnr_avg
+            args.best_ckpt = ckpt
+            args.logger.info("new best ckpt: " + str(ckpt) + ", psnr: " +\
+                             str(psnr_avg) + ", ssim: " + str(ssim_avg))
+def export_models():
+    args.logger.info("exporting best model....")
+    net = BRDNet(args.channel)
+    load_param_into_net(net, load_checkpoint(args.best_ckpt))
+    input_arr = Tensor(np.zeros([1, args.channel, \
+                                args.image_height, args.image_width]), mindspore.float32)
+    export(net, input_arr, file_name=os.path.join(save_dir, "best_ckpt"), \
+           file_format=args.file_format)
+    args.logger.info("export best model finished....")
+
+def train():
+
+    dataset, args.steps_per_epoch = create_BRDNetDataset(args.train_data, args.sigma, \
+                        args.channel, args.batch_size, args.group_size, args.rank, shuffle=True)
+    model = BRDNet(args.channel)
+
+    # resume checkpoint if needed
+    if args.resume_path:
+        args.resume_path = os.path.join(args.resume_path, args.resume_name)
+        args.logger.info('loading resume checkpoint {} into network'.format(args.resume_path))
+        load_param_into_net(model, load_checkpoint(args.resume_path))
+        args.logger.info('loaded resume checkpoint {} into network'.format(args.resume_path))
+
+    model = BRDWithLossCell(model)
+    model.set_train()
+
+    lr_list = get_lr(args.steps_per_epoch, args.epoch, args.lr)
+    optimizer = nn.Adam(params=model.trainable_params(), learning_rate=Tensor(lr_list, mindspore.float32))
+    model = TrainingWrapper(model, optimizer)
+
+    model = Model(model)
+
+    # define callbacks
+    if args.rank == 0:
+        time_cb = TimeMonitor(data_size=args.steps_per_epoch)
+        loss_cb = LossMonitor(per_print_times=10)
+        callbacks = [time_cb, loss_cb]
+    else:
+        callbacks = []
+    if args.rank_save_ckpt_flag:
+        ckpt_config = CheckpointConfig(save_checkpoint_steps=args.steps_per_epoch*args.save_every,
+                                       keep_checkpoint_max=args.ckpt_save_max)
+        save_ckpt_path = os.path.join(save_dir, 'ckpt_' + str(args.rank) + '/')
+        ckpt_cb = ModelCheckpoint(config=ckpt_config,
+                                  directory=save_ckpt_path,
+                                  prefix='channel_'+str(args.channel)+'_sigma_'+str(args.sigma)+'_rank_'+str(args.rank))
+        callbacks.append(ckpt_cb)
+
+    model.train(args.epoch, dataset, callbacks=callbacks, dataset_sink_mode=True)
+
+    args.logger.info("training finished....")
+
+if __name__ == '__main__':
+    if args.is_distributed:
+        assert args.device_target == "Ascend"
+        init()
+        context.set_context(device_id=device_id)
+        args.rank = get_rank()
+        args.group_size = get_group_size()
+        device_num = args.group_size
+        context.reset_auto_parallel_context()
+        context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL)
+    else:
+        if args.device_target == "Ascend":
+            context.set_context(device_id=device_id)
+
+    # select for master rank save ckpt or all rank save, compatible for model parallel
+    args.rank_save_ckpt_flag = 0
+    if args.is_save_on_master:
+        if args.rank == 0:
+            args.rank_save_ckpt_flag = 1
+    else:
+        args.rank_save_ckpt_flag = 1
+
+    args.logger = get_logger(save_dir, "BRDNet", args.rank)
+    args.logger.save_args(args)
+    print('Starting training, Total Epochs: %d' % (args.epoch))
+    copy_data_from_obs()
+    train()
+    if args.rank_save_ckpt_flag:
+        check_best_model()
+        export_models()
+    copy_data_to_obs()
+    args.logger.info('All task finished!')
diff --git a/official/cv/brdnet/preprocess.py b/official/cv/brdnet/preprocess.py
index 7915fd45d388d9080bf6fd673ac044d6b9c6c8ca..c1ef7e6e6872bc16549962d5709a45a5722fe9cb 100644
--- a/official/cv/brdnet/preprocess.py
+++ b/official/cv/brdnet/preprocess.py
@@ -23,31 +23,34 @@ parser.add_argument('--out_dir', type=str, required=True,
                     help='directory to store the image with noise')
 parser.add_argument('--image_path', type=str, required=True,
                     help='directory of image to add noise')
+parser.add_argument("--image_height", type=int, default=500, help="resized image height.")
+parser.add_argument("--image_width", type=int, default=500, help="resized image width.")
 parser.add_argument('--channel', type=int, default=3
                     , help='image channel, 3 for color, 1 for gray')
 parser.add_argument('--sigma', type=int, default=15, help='level of noise')
 args = parser.parse_args()
 
-def add_noise(out_dir, image_path, channel, sigma):
-    file_list = glob.glob(image_path+'*') # image_path must end by '/'
-    if not os.path.exists(out_dir):
-        os.makedirs(out_dir)
+def add_noise():
+    image_list = glob.glob(os.path.join(args.image_path, '*'))
+    if not os.path.exists(args.out_dir):
+        os.makedirs(args.out_dir)
 
-    for file in file_list:
-        print("Adding noise to: ", file)
+    for image in sorted(image_list):
+        print("Adding noise to: ", image)
         # read image
-        if channel == 3:
-            img_clean = np.array(Image.open(file), dtype='float32') / 255.0
+        if args.channel == 3:
+            img_clean = np.array(Image.open(image).resize((args.image_width, \
+                                 args.image_height), Image.ANTIALIAS), dtype='float32') / 255.0
         else:
-            img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), dtype='float32') / 255.0, axis=2)
+            assert args.channel == 1
+            img_clean = np.expand_dims(np.array(Image.open(image).resize((args.image_width, \
+                        args.image_height), Image.ANTIALIAS).convert('L'), dtype='float32') / 255.0, axis=2)
 
         np.random.seed(0) #obtain the same random data when it is in the test phase
-        img_test = img_clean + np.random.normal(0, sigma/255.0, img_clean.shape).astype(np.float32)#HWC
+        img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape).astype(np.float32)#HWC
         img_test = np.expand_dims(img_test.transpose((2, 0, 1)), 0)#NCHW
-        #img_test = np.clip(img_test, 0, 1)
-
-        filename = file.split('/')[-1].split('.')[0]    # get the name of image file
-        img_test.tofile(os.path.join(out_dir, filename+'_noise.bin'))
+        filename = image.split('/')[-1].split('.')[0]    # get the name of image file
+        img_test.tofile(os.path.join(args.out_dir, filename+'_noise.bin'))
 
 if __name__ == "__main__":
-    add_noise(args.out_dir, args.image_path, args.channel, args.sigma)
+    add_noise()
diff --git a/official/cv/brdnet/src/dataset.py b/official/cv/brdnet/src/dataset.py
index 1abd533a64e36b77b1f399948276851e0ad36e44..00ebf9d081e351fec0d688754e822fd403a05c88 100644
--- a/official/cv/brdnet/src/dataset.py
+++ b/official/cv/brdnet/src/dataset.py
@@ -12,23 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
+import os
 import glob
 import numpy as np
 import PIL.Image as Image
 import mindspore.dataset as ds
 import mindspore.dataset.vision.c_transforms as CV
-from src.distributed_sampler import DistributedSampler
 
 class BRDNetDataset:
     """ BRDNetDataset.
     Args:
-        data_path: path of images, must end by '/'
+        data_path: path of images
         sigma: noise level
         channel: 3 for color, 1 for gray
     """
     def __init__(self, data_path, sigma, channel):
         images = []
-        file_dictory = glob.glob(data_path+'*.bmp') #notice the data format
+        file_dictory = glob.glob(os.path.join(data_path, '*.bmp')) #notice the data format
         for file in file_dictory:
             images.append(file)
         self.images = images
@@ -45,10 +45,11 @@ class BRDNetDataset:
         if self.channel == 3:
             get_batch_y = np.array(Image.open(self.images[index]), dtype='uint8')
         else:
-            get_batch_y = np.expand_dims(np.array(Image.open(self.images[index]).convert('L'), dtype='uint8'), axis=2)
+            get_batch_y = np.expand_dims(np.array(\
+                          Image.open(self.images[index]).convert('L'), dtype='uint8'), axis=2)
 
         get_batch_y = get_batch_y.astype('float32')/255.0
-        noise = np.random.normal(0, self.sigma/255.0, get_batch_y.shape).astype('float32')    # noise
+        noise = np.random.normal(0, self.sigma/255.0, get_batch_y.shape).astype('float32')  # noise
         get_batch_x = get_batch_y + noise  # input image = clean image + noise
         return get_batch_x, get_batch_y
 
@@ -58,12 +59,10 @@ class BRDNetDataset:
 def create_BRDNetDataset(data_path, sigma, channel, batch_size, device_num, rank, shuffle):
 
     dataset = BRDNetDataset(data_path, sigma, channel)
-    dataset_len = len(dataset)
-    distributed_sampler = DistributedSampler(dataset_len, device_num, rank, shuffle=shuffle)
     hwc_to_chw = CV.HWC2CHW()
     data_set = ds.GeneratorDataset(dataset, column_names=["image", "label"], \
-               shuffle=shuffle, sampler=distributed_sampler)
+               num_parallel_workers=8, shuffle=shuffle, num_shards=device_num, shard_id=rank)
     data_set = data_set.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=8)
     data_set = data_set.map(input_columns=["label"], operations=hwc_to_chw, num_parallel_workers=8)
     data_set = data_set.batch(batch_size, drop_remainder=True)
-    return data_set, dataset_len
+    return data_set, data_set.get_dataset_size()
diff --git a/official/cv/brdnet/src/distributed_sampler.py b/official/cv/brdnet/src/distributed_sampler.py
deleted file mode 100644
index 255d8831fd1b62a584d56b90add5bdf875f312da..0000000000000000000000000000000000000000
--- a/official/cv/brdnet/src/distributed_sampler.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""BRDNetDataset distributed sampler."""
-from __future__ import division
-import math
-import numpy as np
-
-
-class DistributedSampler:
-    """Distributed sampler."""
-    def __init__(self, dataset_size, num_replicas=None, rank=None, shuffle=True):
-        if num_replicas is None:
-            print("***********Setting world_size to 1 since it is not passed in ******************")
-            num_replicas = 1
-        if rank is None:
-            print("***********Setting rank to 0 since it is not passed in ******************")
-            rank = 0
-        self.dataset_size = dataset_size
-        self.num_replicas = num_replicas
-        self.rank = rank
-        self.epoch = 0
-        self.num_samples = int(math.ceil(dataset_size * 1.0 / self.num_replicas))
-        self.total_size = self.num_samples * self.num_replicas
-        self.shuffle = shuffle
-
-    def __iter__(self):
-        # deterministically shuffle based on epoch
-        if self.shuffle:
-            indices = np.random.RandomState(seed=self.epoch).permutation(self.dataset_size)
-            # np.array type. number from 0 to len(dataset_size)-1, used as index of dataset
-            indices = indices.tolist()
-            self.epoch += 1
-            # change to list type
-        else:
-            indices = list(range(self.dataset_size))
-
-        # add extra samples to make it evenly divisible
-        indices += indices[:(self.total_size - len(indices))]
-        assert len(indices) == self.total_size
-
-        # subsample
-        indices = indices[self.rank:self.total_size:self.num_replicas]
-        assert len(indices) == self.num_samples
-
-        return iter(indices)
-
-    def __len__(self):
-        return self.num_samples
diff --git a/official/cv/brdnet/src/models.py b/official/cv/brdnet/src/models.py
index 8b0cd4f226a75aa10c5d708ee4269abd6caa47b2..c41f38c385c571599501bcbdefe567afb9293c73 100644
--- a/official/cv/brdnet/src/models.py
+++ b/official/cv/brdnet/src/models.py
@@ -32,21 +32,27 @@ class BRDNet(nn.Cell):
     def __init__(self, channel):
         super(BRDNet, self).__init__()
 
-        self.Conv2d_1 = nn.Conv2d(channel, 64, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True)
+        self.Conv2d_1 = nn.Conv2d(channel, 64, kernel_size=(3, 3), stride=(1, 1), \
+                                  pad_mode='same', has_bias=True)
         self.BRN_1 = nn.BatchNorm2d(64, eps=1e-3)
         self.layer1 = self.make_layer1(15)
-        self.Conv2d_2 = nn.Conv2d(64, channel, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True)
-        self.Conv2d_3 = nn.Conv2d(channel, 64, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True)
+        self.Conv2d_2 = nn.Conv2d(64, channel, kernel_size=(3, 3), stride=(1, 1), \
+                                  pad_mode='same', has_bias=True)
+        self.Conv2d_3 = nn.Conv2d(channel, 64, kernel_size=(3, 3), stride=(1, 1), \
+                                  pad_mode='same', has_bias=True)
         self.BRN_2 = nn.BatchNorm2d(64, eps=1e-3)
         self.layer2 = self.make_layer2(7)
-        self.Conv2d_4 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True)
+        self.Conv2d_4 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), \
+                                  pad_mode='same', has_bias=True)
         self.BRN_3 = nn.BatchNorm2d(64, eps=1e-3)
         self.layer3 = self.make_layer2(6)
-        self.Conv2d_5 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True)
+        self.Conv2d_5 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), \
+                                  pad_mode='same', has_bias=True)
         self.BRN_4 = nn.BatchNorm2d(64, eps=1e-3)
-        self.Conv2d_6 = nn.Conv2d(64, channel, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True)
-        self.Conv2d_7 = nn.Conv2d(channel*2, channel, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True)
-
+        self.Conv2d_6 = nn.Conv2d(64, channel, kernel_size=(3, 3), stride=(1, 1), \
+                                  pad_mode='same', has_bias=True)
+        self.Conv2d_7 = nn.Conv2d(channel*2, channel, kernel_size=(3, 3), stride=(1, 1), \
+                                  pad_mode='same', has_bias=True)
         self.relu = nn.ReLU()
         self.sub = ops.Sub()
         self.concat = ops.Concat(axis=1)#NCHW
@@ -55,7 +61,8 @@ class BRDNet(nn.Cell):
         layers = []
         assert nums > 0
         for _ in range(nums):
-            layers.append(nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), pad_mode='same', has_bias=True))
+            layers.append(nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), \
+                                    pad_mode='same', has_bias=True))
             layers.append(nn.BatchNorm2d(64, eps=1e-3))
             layers.append(nn.ReLU())
         return nn.SequentialCell(layers)
@@ -112,7 +119,8 @@ class BRDWithLossCell(nn.Cell):
     def __init__(self, network):
         super(BRDWithLossCell, self).__init__()
         self.network = network
-        self.loss = nn.MSELoss(reduction='sum') #we use 'sum' instead of 'mean' to avoid the loss becoming too small
+        self.loss = nn.MSELoss(reduction='sum') #we use 'sum' instead of 'mean' to avoid
+                                                #the loss becoming too small
     def construct(self, images, targets):
         output = self.network(images)
         return self.loss(output, targets)
diff --git a/official/cv/brdnet/train.py b/official/cv/brdnet/train.py
index 9e8d1f97cca2ef7c0c3238911af71fa2d1d876f3..01a44b5c5d68ea1e59794e1804236f73ea5813ed 100644
--- a/official/cv/brdnet/train.py
+++ b/official/cv/brdnet/train.py
@@ -45,7 +45,10 @@ parser.add_argument('--channel', default=3, type=int
 parser.add_argument('--epoch', default=50, type=int, help='number of train epoches')
 parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate for Adam')
 parser.add_argument('--save_every', default=1, type=int, help='save model at every x epoches')
-parser.add_argument('--pretrain', default=None, type=str, help='path of pre-trained model')
+parser.add_argument('--resume_path', type=str, default=None,
+                    help='put the path to resuming file if needed')
+parser.add_argument('--resume_name', type=str, default=None,
+                    help='resuming file name')
 parser.add_argument('--use_modelarts', type=int, default=0
                     , help='1 for True, 0 for False; when set True, we should load dataset from obs with moxing')
 parser.add_argument('--train_url', type=str, default='train_url/'
@@ -84,7 +87,6 @@ def get_lr(steps_per_epoch, max_epoch, init_lr):
         init_lr /= 10
     return lr_each_step
 
-
 device_id = int(os.getenv('DEVICE_ID', '0'))
 context.set_context(mode=context.GRAPH_MODE,
                     device_target=args.device_target, save_graphs=False)
@@ -112,35 +114,33 @@ def train():
     else:
         args.rank_save_ckpt_flag = 1
 
-
     args.logger = get_logger(save_dir, "BRDNet", args.rank)
     args.logger.save_args(args)
 
     if args.use_modelarts:
         import moxing as mox
         args.logger.info("copying train data from obs to cache....")
-        mox.file.copy_parallel(args.train_data, 'cache/dataset') # the args.train_data must end by '/'
+        mox.file.copy_parallel(args.train_data, 'cache/dataset')
         args.logger.info("copying traindata finished....")
-        args.train_data = 'cache/dataset/'  # the args.train_data must end by '/'
+        args.train_data = 'cache/dataset/'
 
-    dataset, batch_num = create_BRDNetDataset(args.train_data, args.sigma, \
+    dataset, args.steps_per_epoch = create_BRDNetDataset(args.train_data, args.sigma, \
                         args.channel, args.batch_size, args.group_size, args.rank, shuffle=True)
-
-    args.steps_per_epoch = int(batch_num / args.batch_size / args.group_size)
-
     model = BRDNet(args.channel)
 
-    if args.pretrain:
+    # resume checkpoint if needed
+    if args.resume_path:
         if args.use_modelarts:
             import moxing as mox
-            args.logger.info("copying pretrain model from obs to cache....")
-            mox.file.copy_parallel(args.pretrain, 'cache/pretrain')
-            args.logger.info("copying pretrain model finished....")
-            args.pretrain = 'cache/pretrain/'+args.pretrain.split('/')[-1]
+            args.logger.info("copying resume checkpoint from obs to cache....")
+            mox.file.copy_parallel(args.resume_path, 'cache/resume_path')
+            args.logger.info("copying resume checkpoint finished....")
+            args.resume_path = 'cache/resume_path/'
 
-        args.logger.info('loading pre-trained model {} into network'.format(args.pretrain))
-        load_param_into_net(model, load_checkpoint(args.pretrain))
-        args.logger.info('loaded pre-trained model {} into network'.format(args.pretrain))
+        args.resume_path = os.path.join(args.resume_path, args.resume_name)
+        args.logger.info('loading resume checkpoint {} into network'.format(args.resume_path))
+        load_param_into_net(model, load_checkpoint(args.resume_path))
+        args.logger.info('loaded resume checkpoint {} into network'.format(args.resume_path))
 
 
     model = BRDWithLossCell(model)
@@ -154,11 +154,11 @@ def train():
 
     # define callbacks
     if args.rank == 0:
-        time_cb = TimeMonitor(data_size=batch_num)
+        time_cb = TimeMonitor(data_size=args.steps_per_epoch)
         loss_cb = LossMonitor(per_print_times=10)
         callbacks = [time_cb, loss_cb]
     else:
-        callbacks = None
+        callbacks = []
     if args.rank_save_ckpt_flag:
         ckpt_config = CheckpointConfig(save_checkpoint_steps=args.steps_per_epoch*args.save_every,
                                        keep_checkpoint_max=args.ckpt_save_max)