diff --git a/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh b/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh
index 1668982990a32df44b1eea569bfd875c018a951d..61ee09c0a55d102967eeddbcda7a3af9e267ac48 100644
--- a/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh
+++ b/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh
@@ -29,11 +29,6 @@ input_air_path=$1
 aipp_cfg_file=$2
 output_om_path=$3
 
-export install_path=/usr/local/Ascend/
-export ASCEND_ATC_PATH=${install_path}/atc
-
-export ASCEND_SLOG_PRINT_TO_STDOUT=1
-
 echo "Input AIR file path: ${input_air_path}"
 echo "Output OM file path: ${output_om_path}"
 
diff --git a/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp b/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp
index b10d3741558c0d35f5876506d8cf1aad9922fe2b..ec3658eabba30f2c09fe0d5617a5af9702dba599 100644
--- a/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp
+++ b/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp
@@ -24,6 +24,14 @@
 #include "acl/acl.h"
 
 
+namespace localParameter {
+    const uint32_t VECTOR_FIRST_INDEX = 0;
+    const uint32_t VECTOR_SECOND_INDEX = 1;
+    const uint32_t VECTOR_THIRD_INDEX = 2;
+    const uint32_t VECTOR_FOURTH_INDEX = 3;
+    const uint32_t VECTOR_FIFTH_INDEX = 4;
+}
+
 namespace {
 // Output Tensor
 const int OUTPUT_TENSOR_SIZE = 4;
@@ -133,39 +141,45 @@ bool MaskRcnnMindsporePost::IsValidTensors(const std::vector<TensorBase> &tensor
     }
 
     uint32_t total_num = classNum_ * rpnMaxNum_;
-    if (bboxShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: " << total_num << "/" << bboxShape[VECTOR_SECOND_INDEX] << ").";
+    if (bboxShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: " << total_num << "/"
+                 << bboxShape[localParameter::VECTOR_SECOND_INDEX] << ").";
         return false;
     }
 
-    if (bboxShape[VECTOR_THIRD_INDEX] != OUTPUT_BBOX_TWO_INDEX_SHAPE) {
-        LogError << "The number of bbox[" << VECTOR_THIRD_INDEX << "] dimensions (" << bboxShape[VECTOR_THIRD_INDEX]
+    if (bboxShape[localParameter::VECTOR_THIRD_INDEX] != OUTPUT_BBOX_TWO_INDEX_SHAPE) {
+        LogError << "The number of bbox[" << localParameter::VECTOR_THIRD_INDEX << "] dimensions ("
+                 << bboxShape[localParameter::VECTOR_THIRD_INDEX]
                  << ") is not equal to (" << OUTPUT_BBOX_TWO_INDEX_SHAPE << ")";
         return false;
     }
 
     auto classShape = tensors[OUTPUT_CLASS_INDEX].GetShape();
-    if (classShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: (" << total_num << "/" << classShape[VECTOR_SECOND_INDEX]
+    if (classShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: (" << total_num << "/"
+                 << classShape[localParameter::VECTOR_SECOND_INDEX]
                  << "). ";
         return false;
     }
 
     auto maskShape = tensors[OUTPUT_MASK_INDEX].GetShape();
-    if (maskShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: (" << total_num << "/" << maskShape[VECTOR_SECOND_INDEX] << ").";
+    if (maskShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: (" << total_num << "/"
+                 << maskShape[localParameter::VECTOR_SECOND_INDEX] << ").";
         return false;
     }
 
     auto maskAreaShape = tensors[OUTPUT_MASK_AREA_INDEX].GetShape();
-    if (maskAreaShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: (" << total_num << "/" << maskAreaShape[VECTOR_SECOND_INDEX]
+    if (maskAreaShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: (" << total_num << "/"
+                 << maskAreaShape[localParameter::VECTOR_SECOND_INDEX]
                  << ").";
         return false;
     }
 
-    if (maskAreaShape[VECTOR_THIRD_INDEX] != maskSize_) {
-        LogError << "The output tensor of mask is mismatched: (" << maskAreaShape[VECTOR_THIRD_INDEX] << "/"
+    if (maskAreaShape[localParameter::VECTOR_THIRD_INDEX] != maskSize_) {
+        LogError << "The output tensor of mask is mismatched: ("
+                 << maskAreaShape[localParameter::VECTOR_THIRD_INDEX] << "/"
                  << maskSize_ << ").";
         return false;
     }
@@ -186,8 +200,8 @@ static void GetDetectBoxesTopK(std::vector<MxBase::DetectBox> &detBoxes, size_t
     detBoxes.erase(detBoxes.begin() + kVal, detBoxes.end());
 }
 
-void MaskRcnnMindsporePost::GetValidDetBoxes(const std::vector<TensorBase> &tensors, std::vector<DetectBox> &detBoxes,
-                                             const uint32_t batchNum) {
+void MaskRcnnMindsporePost::GetValidDetBoxes(const std::vector<TensorBase> &tensors,
+                                             std::vector<DetectBox> &detBoxes, const uint32_t batchNum) {
     LogInfo << "Begin to GetValidDetBoxes Mask GetValidDetBoxes.";
     auto *bboxPtr = reinterpret_cast<aclFloat16 *>(GetBuffer(tensors[OUTPUT_BBOX_INDEX], batchNum));
     auto *labelPtr = reinterpret_cast<int32_t *>(GetBuffer(tensors[OUTPUT_CLASS_INDEX], batchNum));
@@ -228,8 +242,8 @@ APP_ERROR MaskRcnnMindsporePost::GetMaskSize(const ObjectInfo &objInfo, const Re
     int width = static_cast<int>(objInfo.x1 - objInfo.x0 + 1);
     int height = static_cast<int>(objInfo.y1 - objInfo.y0 + 1);
     if (width < 1 || height < 1) {
-        LogError << "The mask bbox is invalid, will be ignored, bboxWidth: " << width << ", bboxHeight: " << height
-                 << ".";
+        LogError << "The mask bbox is invalid, will be ignored, bboxWidth: " <<
+                 width << ", bboxHeight: " << height << ".";
         return APP_ERR_COMM_FAILURE;
     }
 
@@ -238,7 +252,8 @@ APP_ERROR MaskRcnnMindsporePost::GetMaskSize(const ObjectInfo &objInfo, const Re
     return APP_ERR_OK;
 }
 
-APP_ERROR MaskRcnnMindsporePost::MaskPostProcess(ObjectInfo &objInfo, void *maskPtr, const ResizedImageInfo &imgInfo) {
+APP_ERROR MaskRcnnMindsporePost::MaskPostProcess(ObjectInfo &objInfo, void *maskPtr,
+                                                 const ResizedImageInfo &imgInfo) {
     // resize
     cv::Mat maskMat(maskSize_, maskSize_, CV_32FC1);
     auto *maskAclPtr = reinterpret_cast<aclFloat16 *>(maskPtr);
diff --git a/official/cv/yolov4/infer/README.md b/official/cv/yolov4/infer/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..673cfedfee18ae6fe70619657c97a84d14a7a0c9
--- /dev/null
+++ b/official/cv/yolov4/infer/README.md
@@ -0,0 +1,831 @@
+# 交付件基本信息
+
+**发布者(Publisher)**:Huawei
+
+**应用领域(Application Domain)**:Object Detection
+
+**版本(Version)**:1.1
+
+**修改时间(Modified)**:2022.3.29
+
+**大小(Size)**:251.52 MB (air) / 126.24 MB (om) / 503.62 MB (ckpt)
+
+**框架(Framework)**:MindSpore\_1.3.0
+
+**模型格式(Model Format)**:ckpt/air/om
+
+**精度(Precision)**:Mixed/FP16
+
+**处理器(Processor)**:昇腾910/昇腾310
+
+**应用级别(Categories)**:Released
+
+**描述(Description)**:基于MindSpore框架的YOLOv4网络模型训练并保存模型,通过ATC工具转换,可在昇腾AI设备上运行,支持使用MindX SDK及MxBase进行推理
+
+# 概述
+
+## 简述
+
+YOLOv4作为先进的检测器,它比所有可用的替代检测器更快(FPS)并且更准确(MS COCO AP50 ... 95和AP50)。
+
+本文已经验证了大量的特征,并选择使用这些特征来提高分类和检测的精度。
+
+这些特性可以作为未来研究和开发的最佳实践。
+
+* [参考论文](https://arxiv.org/pdf/2004.10934.pdf): Bochkovskiy A, Wang C Y, Liao H Y M. YOLOv4: Optimal Speed and Accuracy of Object Detection[J]. arXiv preprint arXiv:2004.10934, 2020.
+
+通过Git获取对应commit_id的代码方法如下:
+
+```shell
+git clone {repository_url}     # 克隆仓库的代码
+cd {repository_name}           # 切换到模型的代码仓目录
+git checkout  {branch}         # 切换到对应分支
+git reset --hard {commit_id}  # 代码设置到对应的commit_id
+cd {code_path}                # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+```
+
+## 默认配置
+
+1. 网络结构  
+
+选择CSPDarknet53主干、SPP附加模块、PANet路径聚合网络和YOLOv4(基于锚点)头作为YOLOv4架构。  
+
+2. 预训练模型  
+
+YOLOv4需要CSPDarknet53主干来提取图像特征进行检测。  
+
+可以从[这里](https://gitee.com/link?target=https%3A%2F%2Fdownload.mindspore.cn%2Fmodel_zoo%2Fr1.2%2Fcspdarknet53_ascend_v120_imagenet2012_official_cv_bs64_top1acc7854_top5acc9428%2Fcspdarknet53_ascend_v120_imagenet2012_official_cv_bs64_top1acc7854_top5acc9428.ckpt) 获取到在ImageNet2012上训练的预训练模型。
+
+3. 训练参数
+
+```SHELL
+lr_scheduler:cosine_annealing
+lr:0.1
+training_shape: 416
+max_epochs:320
+warmup_epochs: 4
+```
+
+## 支持特性
+
+### 支持特性
+
+支持的特性包括:1、分布式并行训练。2、混合精度训练。
+
+### 分布式并行训练
+
+MindSpore支持数据并行及自动并行。自动并行是MindSpore融合了数据并行、模型并行及混合并行的一种分布式并行模式,可以自动建立代价模型,为用户选择一种并行模式。相关代码示例。
+
+```shell
+context.set_auto_parallel_context(parallel_mode = ParallelMode.DATA_PARALLEL, device_num = device_num)
+```
+
+### 混合精度训练
+
+混合精度训练方法是通过混合使用单精度和半精度数据格式来加速深度神经网络训练的过程,同时保持了单精度训练所能达到的网络精度。混合精度训练能够加速计算过程,同时减少内存使用和存取,并使得在特定的硬件上可以训练更大的模型或batch size。
+
+对于FP16的算子,若给定的数据类型是FP32,MindSpore框架的后端会进行降精度处理。用户可以开启INFO日志,并通过搜索关键字“Reduce precision”查看降精度处理的算子。
+
+# 准备工作
+
+## 训练环境准备
+
+1. 硬件环境准备请参见各硬件产品[“驱动和固件安装升级指南”](https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909) 。需要在硬件设备上安装与CANN版本配套的固件与驱动。
+
+2. 宿主机上需要安装Python3和Docker,并登录[Ascend Hub中心](https://ascend.huawei.com/ascendhub/#/home) 获取镜像。
+
+   当前模型支持的镜像列表如下表所示。  
+   **表 1** 镜像列表  
+
+    | 镜像名称 | 镜像版本 | 配套CANN版本 |  
+    | ------- | ------------ | --------------------- |  
+    | ARM/x86架构:[mindspore-modelzoo](https://ascendhub.huawei.com/#/detail/mindspore-modelzoo) | 21.0.4   | [5.0.2](https://www.hiascend.com/software/cann/commercial)  |  
+
+## 推理环境准备
+
+1. 硬件环境、开发环境和运行环境准备请参见[《CANN 软件安装指南》](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-upgrade) 。
+
+2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/home) 获取镜像。
+
+   当前模型支持的镜像列表如下表所示。
+
+   **表 1** 镜像列表
+
+   | 镜像名称 | 镜像版本 | 配套CANN版本 |  
+   | ------- | ------------ | --------------------- |  
+   | ARM/x86架构:[infer-modelzoo](https://ascendhub.huawei.com/#/detail/infer-modelzoo) | 21.0.4   | [5.0.2](https://www.hiascend.com/software/cann/commercial) |
+
+## 源码介绍
+
+1. 脚本目录结构如下:
+
+ ```shell
+ infer
+ ├──README.md              # 离线推理文档  
+ ├──convert  
+ │    ├──aipp.config       # aipp配置文件  
+ │    └──air2om.sh         # om模型转换脚本  
+ │──data  
+ │    ├──models            # 模型文件  
+ │    │   ├──yolov4_coco2017_acc_Test.cfg  #infer的超参数设置  
+ │    │   ├──yolov4.om     # 生成的om模型  
+ │    │   ├──yolov4.air    # modelarts训练后生成的air模型  
+ │    │   ├──trainval.txt  # 为infer准备的验证数据
+ │    │   ├──object_task_metric.py  # 将infer后的结果转换为coco模式
+ │    │   └──coco2017.names  # coco数据集样本的label
+ │    └──images            # 模型输入数据集, 将数据集中的val2017中的内容拷贝过来
+ │───mxbase                # 基于mxbase推理脚本
+ │    ├──src
+ │    │   ├──PostProcess   # 前处理
+ │    │   │   ├──Yolov4MindsporePost.cpp
+ │    │   │   └──Yolov4MindsporePost.h
+ │    │   ├──Yolov4Detection.h
+ │    │   ├──Yolov4Detection.cpp
+ │    │   └──main.cpp
+ │    ├──CMakeLists.txt  
+ │    ├──build.sh          # 编译
+ │    └──infermxbase.sh    # 验证推理结果精度
+ │──sdk                    # 基于sdk包推理脚本
+ │    ├──mxpi
+ │    │   ├──CMakeLists.txt
+ │    │   └──build.sh  
+ │    ├──config
+ │    │   └──yolov4.pipeline
+ │    ├──run.sh  
+ │    └──infersdk.sh      # 验证推理结果精度
+ └── docker_start_infer.sh # 启动容器脚本
+ ```
+
+# 训练
+
+## 数据集准备
+
+1. 请用户自行准备好数据集,使用的数据集:[COCO2017](https://gitee.com/link?target=https%3A%2F%2Fcocodataset.org%2F%23download)  
+
+* 支持的数据集:[COCO2017](https://gitee.com/link?target=https%3A%2F%2Fcocodataset.org%2F%23download) 或与MS COCO格式相同的数据集  
+
+* 支持的标注:[COCO2017](https://gitee.com/link?target=https%3A%2F%2Fcocodataset.org%2F%23download) 或与MS COCO相同格式的标注
+
+2. 数据准备
+
+* 将数据集放到任意路径,文件夹应该包含如下文件
+
+      ```SHELL
+      .
+      └── datasets
+        ├── annotations
+          │   ├─ instances_train2017.json
+          │   └─ instances_val2017.json
+          ├─ train2017  
+          │   ├─picture1.jpg
+          │   ├─ ...
+          │   └─picturen.jpg
+          ├─ val2017
+              ├─picture1.jpg
+              ├─ ...
+              └─picturen.jpg
+      ```
+
+* 为数据集生成TXT格式推理文件。
+
+      ```shell
+      # 导出txt推理数据
+      python coco_trainval_anns.py --data_url=./datasets/ --train_url=./infer/data/models/ --val_url=./infer/data/images/
+      #data_url参数为数据集datasets存储路径,train_url参数为存储txt路径,val_url参数为推理数据集存放的路径
+      ```
+
+      每行如下所示:
+
+      ```  SHELL
+      0 ../infer/data/images/000000289343.jpg 529 640 16 473 395 511 423 0 204 235 264 412 13 0 499 339 605 1 204 304 256 456
+      ```  
+
+      每行是按空间分割的图像标注,第一列数是序号,第二列是推理使用的图像的绝对路径,其余为[xmin,ymin,xmax,ymax,class]格式的框和类信息。
+
+## 高级参考
+
+### 脚本参数
+
+1. 训练和测试部分重要参数如下:
+
+   ```SHELL
+   usage: modelarts.py  [--data_url DATA_URL] [--train_url TRAIN_URL] [--checkpoint_url CHECKPOINT_URL]  
+   options:
+      --train_url    The path model saved
+      --data_url   Dataset directory
+      --checkpoint_url   The path pre-model saved
+   ```
+
+2. 参数意义如下:
+
+   ```SHELL
+    # Train options
+    data_dir: "Train dataset directory."
+    per_batch_size: "Batch size for Training."
+    pretrained_backbone: "The ckpt file of CspDarkNet53."
+    resume_yolov4: "The ckpt file of YOLOv4, which used to fine tune."
+    pretrained_checkpoint: "The ckpt file of YoloV4CspDarkNet53."
+    filter_weight: "Filter the last weight parameters"
+    lr_scheduler: "Learning rate scheduler, options: exponential, cosine_annealing."
+    lr: "Learning rate."
+    lr_epochs: "Epoch of changing of lr changing, split with ','."
+    lr_gamma: "Decrease lr by a factor of exponential lr_scheduler."
+    eta_min: "Eta_min in cosine_annealing scheduler."
+    t_max: "T-max in cosine_annealing scheduler."
+    max_epoch: "Max epoch num to train the model."
+    warmup_epochs: "Warmup epochs."
+    weight_decay: "Weight decay factor."
+    momentum: "Momentum."
+    loss_scale: "Static loss scale."
+    label_smooth: "Whether to use label smooth in CE."
+    label_smooth_factor: "Smooth strength of original one-hot."
+    log_interval: "Logging interval steps."
+    ckpt_path: "Checkpoint save location."
+    ckpt_interval: "Save checkpoint interval."
+    is_save_on_master: "Save ckpt on master or all rank, 1 for master, 0 for all ranks."
+    is_distributed: "Distribute train or not, 1 for yes, 0 for no."
+    rank: "Local rank of distributed."
+    group_size: "World size of device."
+    need_profiler: "Whether use profiler. 0 for no, 1 for yes."
+    training_shape: "Fix training shape."
+    resize_rate: "Resize rate for multi-scale training."
+    run_eval: "Run evaluation when training."
+    save_best_ckpt: "Save best checkpoint when run_eval is True."
+    eval_start_epoch: "Evaluation start epoch when run_eval is True."
+    eval_interval: "Evaluation interval when run_eval is True"
+    ann_file: "path to annotation"
+    each_multiscale: "Apply multi-scale for each scale"
+    detect_head_loss_coff: "the loss coefficient of detect head.
+                           The order of coefficients is large head, medium head and small head"
+    bbox_class_loss_coff: "bbox and class loss coefficient.
+                           The order of coefficients is ciou loss, confidence loss and class loss"
+    labels: "the label of train data"
+    mosaic: "use mosaic data augment"
+    multi_label: "use multi label to nms"
+    multi_label_thresh: "multi label thresh"
+
+    # Eval options
+    pretrained: "model_path, local pretrained model to load"
+    log_path: "checkpoint save location"
+    ann_val_file: "path to annotation"
+
+    # Export options
+    device_id: "Device id for export"
+    batch_size: "batch size for export"
+    testing_shape: "shape for test"
+    ckpt_file: "Checkpoint file path for export"
+    file_name: "output file name for export"
+    file_format: "file format for export"
+    keep_detect: "keep the detect module or not, default: True"
+    img_id_file_path: 'path of image dataset'
+    result_files: 'path to 310 infer result floder'
+   ```
+
+# 推理
+
+## 准备推理数据
+
+1. 下载源码包。
+
+   单击“下载模型脚本”和“下载模型”,并下载所需MindX SDK开发套件(mxManufacture)。
+
+2. 将源码上传至推理服务器任意目录并解压(如:“/home/data/wwq“)。
+
+3. 编译镜像。
+
+   **docker build -t** *infer_image* **--build-arg FROM_IMAGE_NAME=** *base_image:tag* **--build-arg SDK_PKG=** *sdk_pkg* **.**
+
+   **表 1**  参数说明
+
+   <table><thead align="left"><tr id="zh-cn_topic_0304403934_row9243114772414"><th class="cellrowborder" valign="top" width="40%" id="mcps1.2.3.1.1"><p id="zh-cn_topic_0304403934_p524364716241"><a name="zh-cn_topic_0304403934_p524364716241"></a><a name="zh-cn_topic_0304403934_p524364716241"></a>参数</p>
+   </th>
+   <th class="cellrowborder" valign="top" width="60%" id="mcps1.2.3.1.2"><p id="zh-cn_topic_0304403934_p172431247182412"><a name="zh-cn_topic_0304403934_p172431247182412"></a><a name="zh-cn_topic_0304403934_p172431247182412"></a>说明</p>
+   </th>
+   </tr>
+   </thead>
+   <tbody><tr id="zh-cn_topic_0304403934_row52431473244"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p144312172333"><a name="p144312172333"></a><a name="p144312172333"></a><em id="i290520133315"><a name="i290520133315"></a><a name="i290520133315"></a>infer_image</em></p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="zh-cn_topic_0304403934_p10243144712410"><a name="zh-cn_topic_0304403934_p10243144712410"></a><a name="zh-cn_topic_0304403934_p10243144712410"></a>推理镜像名称,根据实际写入。</p>
+   </td>
+   </tr>
+   <tr id="zh-cn_topic_0304403934_row1624394732415"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="zh-cn_topic_0304403934_p92434478242"><a name="zh-cn_topic_0304403934_p92434478242"></a><a name="zh-cn_topic_0304403934_p92434478242"></a><em id="i78645182347"><a name="i78645182347"></a><a name="i78645182347"></a>base_image</em></p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="zh-cn_topic_0304403934_p324313472240"><a name="zh-cn_topic_0304403934_p324313472240"></a><a name="zh-cn_topic_0304403934_p324313472240"></a>基础镜像,可从Ascend Hub上下载。</p>
+   </td>
+   </tr>
+   <tr id="row2523459163416"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p55241359203412"><a name="p55241359203412"></a><a name="p55241359203412"></a><em id="i194517711355"><a name="i194517711355"></a><a name="i194517711355"></a>tag</em></p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="p1952435919341"><a name="p1952435919341"></a><a name="p1952435919341"></a>镜像tag,请根据实际配置,如:21.0.1。</p>
+   </td>
+   </tr>
+   <tr id="zh-cn_topic_0304403934_row132436473240"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="zh-cn_topic_0304403934_p1824319472242"><a name="zh-cn_topic_0304403934_p1824319472242"></a><a name="zh-cn_topic_0304403934_p1824319472242"></a>sdk_pkg</p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="zh-cn_topic_0304403934_p7243144712249"><a name="zh-cn_topic_0304403934_p7243144712249"></a><a name="zh-cn_topic_0304403934_p7243144712249"></a>下载的mxManufacture包名称,如Ascend-mindxsdk-mxmanufacture_<em id="i061383054119"><a name="i061383054119"></a><a name="i061383054119"></a>{version}</em>_linux-<em id="i1055956194514"><a name="i1055956194514"></a><a name="i1055956194514"></a>{arch}</em>.run。</p>
+   </td>
+   </tr>
+   </tbody>
+   </table>
+
+   > ![输入图片说明](https://images.gitee.com/uploads/images/2021/0719/172222_3c2963f4_923381.gif "icon-note.gif") **说明:**  
+   > 不要遗漏命令结尾的“.“。
+
+4. 准备数据。
+
+   执行位于/infer/data/models下的coco_trainval_anns.py脚本,导出准备用于推理的数据。
+
+      ```shell
+      # 导出txt推理数据
+      python coco_trainval_anns.py --data_url=./datasets/ --train_url=./infer/data/models/ --val_url=./infer/data/images/
+      #data_url参数为数据集datasets存储路径,train_url参数为存储txt路径,val_url为推理数据集存放的路径
+      ```
+
+   AIR模型可通过“模型训练”后转换生成。
+
+   将生成的推理数据拷贝到 infer/data/models、infer/mxbase 和 infer/sdk 目录下。
+
+5. 启动容器。
+
+   进入“infer“目录,执行以下命令,启动容器。  
+
+   ```shell
+   bash docker_start_infer.sh docker_image:tag model_dir
+   ```
+
+   > ![输入图片说明](https://images.gitee.com/uploads/images/2021/0926/181445_0077d606_8725359.gif) **说明:**
+   > MindX SDK开发套件(mxManufacture)已安装在基础镜像中,安装路径:“/usr/local/sdk_home“。
+
+   **表 2** 参数说明
+
+   | 参数           | 说明                                  |
+   | -------------- | ------------------------------------- |
+   | *docker_image* | 推理镜像名称及镜像tag,根据实际写入。 |
+   | tag | 镜像tag,请根据实际配置,如:21.0.2。 |
+   | data_path      | 代码路径。                            |
+
+   启动容器时会将推理芯片和数据路径挂载到容器中。可根据需要通过修改**docker_start_infer.sh**的device来指定挂载的推理芯片。
+
+## 模型转换
+
+   1. 准备模型文件。
+
+* 将ModelArts训练之后导出的 **.air 模型文件放入 infer/data/models 目录下
+
+   2. 模型转换。
+
+* 执行 infer/convert/air2om.sh, 转换命令如下 。
+
+      ```SHELL
+      cd ./infer/convert
+      #bash air2om.sh air_path(转换脚本AIR文件路径) om_path(生成的OM文件名,转换脚本会在此基础上添加.om后缀)
+      bash air2om.sh ../data/models/yolov4.air ../data/models/yolov4
+      ```
+
+      执行完成后会在 infer/data/model 目录下生成 **.om 模型文件,注意此处 om 文件名需与 pipeline 中的保持一致。
+
+## MxBase推理
+
+   1. 配置环境变量
+
+      ```SHELL
+      export ASCEND_HOME=/usr/local/Ascend
+      export ASCEND_VERSION=ascend-toolkit/latest
+      export ARCH_PATTERN=.
+      export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib/modelpostprocessors:$LD_LIBRARY_PATH
+      ```
+
+   2. (可选)修改配置文件
+      可根据实际情况修改,配置文件位于“mxbase/src/main.cpp”中,可修改参数如下:
+
+      ```SHELL
+      initParam.deviceId = 0;
+      initParam.labelPath = "../data/models/coco2017.names";#实际使用的标签名表
+      initParam.checkTensor = true;
+      initParam.modelPath = "../data/models/yolov4.om";#实际的推理模型文件
+      initParam.classNum = 80;#实际数据集类别数
+      initParam.biasesNum = 18;
+      initParam.biases = "12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401";
+      initParam.objectnessThresh = "0.001";
+      initParam.iouThresh = "0.6";#nms用到的IOU阈值,可调整
+      initParam.scoreThresh = "0.001";
+      initParam.yoloType = 3;
+      initParam.modelType = 0;
+      initParam.inputType = 0;
+      initParam.anchorDim = 3;
+      ```
+
+      根据实际情况修改"mxbase/src/Yolov4Detection.cpp"中的图片缩放尺寸:
+
+      ```SHELL
+      APP_ERROR Yolov4TinyDetectionOpencv::Resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat) {
+         static constexpr uint32_t resizeHeight = 608; #模型输入高度
+         static constexpr uint32_t resizeWidth = 608; #模型输入宽度
+         cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight));
+         return APP_ERR_OK;
+      }
+      ```
+
+   3. 编译工程。
+
+      ```
+      cd ./infer/mxbase
+      bash build.sh
+      ```
+
+   4. 运行推理服务。
+
+      a. 确保验证集图片的权限为640
+
+      ```shell
+      #可以通过以下命令确保验证集图片的权限为640
+      chmod 640 ../data/images/. -R #此处为验证集图片地址
+      ```
+
+      b. 确保result文件夹为空,或者不存在
+
+      ```shell
+      #可以通过以下命令确保结果文件夹为空,或者不存在
+      rm -rf ./result/result.txt #删除结果文件
+      rm -rf ./result #删除结果文件夹
+      rm -rf ./result.json #删除结果转换文件
+      ```
+
+      c. 执行推理脚本,确保记录推理图片路径的文件在/infer/mxbase文件夹下,命令如下 。
+
+      ```shell
+      #./build/Yolov4_mindspore image_path_txt(记录推理图片路径的txt文件。如:trainval.txt)
+      ./build/Yolov4_mindspore ./trainval.txt
+      ```
+
+      推理结果保存在“./result/result.txt”。
+
+   5. 观察结果。
+
+      拷贝infer/data/models/object_task_metric.py 和coco2017的验证集标签instances_val2017.json文件到“mxbase”目录下.  
+      根据实际情况修改object_task_metric.py代码
+
+      ```shell
+      if __name__ == "__main__":
+        ban_path = './trainval.txt' # 修改为实际的推理数据集路径的文件
+        input_file = './result/result.txt'
+        if not os.path.exists(ban_path):
+            print('The infer text file does not exist.')
+        if not os.path.exists(input_file):
+            print('The result text file does not exist.')
+
+        image_id_list = get_image_id(ban_path)
+        result_dict = get_dict_from_file(input_file, image_id_list)
+        json_file_name = './result.json'
+        with open(json_file_name, 'w') as f:
+            json.dump(result_dict, f)
+
+        # set iouType to 'segm', 'bbox' or 'keypoints'
+        ann_type = ('segm', 'bbox', 'keypoints')
+        # specify type here
+        ann_type = ann_type[1]
+        coco_gt_file = './instances_val2017.json' # 修改为真实标签文件
+      ```
+
+   6. 查看精度  
+
+      执行以下命令计算精度。
+
+      ```shell
+      bash infermxbase.sh
+      ```
+
+      推理结果以json格式保存,路径为“./result.json”。  
+      精度信息示例如下所示:
+
+      ```shell
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.455
+       Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.646
+       Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.495
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.278
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.481
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.565
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.358
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.575
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.605
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.424
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.632
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.710
+      ```
+
+## MindX SDK推理
+
+   1. 编译后处理代码
+
+      基于MindX SDK推理的后处理代码编译时直接编译 “mxbase/src/PostProcess”。
+
+      ```shell
+      cd infer/sdk/mxpi
+      bash build.sh
+      ```
+
+   2. 修改配置文件。
+
+      a.根据实际情况修改config中的pipeline文件。
+
+      ```shell
+      {
+       "im_yolov4": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_imagedecoder0"
+        },
+        "mxpi_imagedecoder0": {
+            "props": {
+                "handleMethod": "opencv"
+            },
+            "factory": "mxpi_imagedecoder",
+            "next": "mxpi_imageresize0"
+        },
+        "mxpi_imageresize0": {
+            "props": {
+                "parentName": "mxpi_imagedecoder0",
+                "handleMethod": "opencv",
+                "resizeHeight": "608",#模型输入高度
+                "resizeWidth": "608",#模型输入宽度
+                "resizeType": "Resizer_Stretch"
+            },
+            "factory": "mxpi_imageresize",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "mxpi_imageresize0",
+                "modelPath": "../data/models/yolov4.om",#推理模型路径
+                "waitingTime": "3000",
+                "outputDeviceId": "-1"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_objectpostprocessor0"
+        },
+        "mxpi_objectpostprocessor0": {
+            "props": {
+                "dataSource": "mxpi_tensorinfer0",
+                "postProcessConfigPath": "../data/models/yolov4_coco2017_acc_test.cfg",#推理后处理相关参数配置文件路径
+                "labelPath": "../data/models/coco2017.names",#推理数据集类别标签文件,需自行添加到对应目录
+                "postProcessLibPath": "./mxpi/build/libyolov4_mindspore_post.so"#编译后处理so文件
+            },
+            "factory": "mxpi_objectpostprocessor",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_objectpostprocessor0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+        }
+      }
+      ```
+
+      b. 可根据实际情况修改后处理配置文件  
+
+      其配置文件yolov4_coco2017_acc_test.cfg在“../data/models/”目录下.
+
+      ```shell
+      # hyper-parameter
+      CLASS_NUM=80 #推理数据集类别数
+      BIASES_NUM=18
+      BIASES=12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401
+      SCORE_THRESH=0.001
+      OBJECTNESS_THRESH=0.001
+      IOU_THRESH=0.6 #nms用到的IOU阈值,可调整
+      YOLO_TYPE=3
+      ANCHOR_DIM=3
+      MODEL_TYPE=0
+      RESIZE_FLAG=0
+      ```
+
+   3. 运行推理服务。  
+
+      a. 确保trainval.txt文件在sdk目录下。
+
+      b. 修改main.py中记录推理图片路径的文件路径。  
+
+      ```shell
+      infer_file = './trainval.txt' #根据实际情况进行修改
+      ```
+
+      c. 确保验证集图片和编译后生成的后处理文件/sdk/mxpi/build/libyolov4_mindspore_post.so的权限为640
+
+      ```shell
+      #可以通过以下命令确保验证集图片和后处理文件的权限为640
+      chmod 640 ../data/images/. -R #此处为验证集图片地址
+      chmod 640 ./mxpi/build/libyolov4_mindspore_post.so #此处为后处理文件路径
+      ```
+
+      d. 确保result文件夹为空,或者不存在
+
+      ```shell
+      #可以通过以下命令确保结果文件夹为空,或者不存在
+      rm -rf ./result/result.txt #删除结果文件
+      rm -rf ./result #删除结果文件夹
+      rm -rf ./result.json #删除结果转换文件
+      ```
+
+      e. 执行推理
+
+      ```shell
+      cd infer/sdk
+      bash run.sh
+      ```
+
+   4. 观察结果。
+
+      拷贝infer/data/models/object_task_metric.py和coco2017的验证集标签instances_val2017.json文件到“sdk”目录下。  
+      根据实际情况修改object_task_metric.py代码。  
+
+      ```shell
+      ...
+      if __name__ == "__main__":
+        ban_path = './trainval.txt' # 修改为实际的推理数据集路径的文件
+        input_file = './result/result.txt'
+        if not os.path.exists(ban_path):
+            print('The infer text file does not exist.')
+        if not os.path.exists(input_file):
+            print('The result text file does not exist.')
+
+        image_id_list = get_image_id(ban_path)
+        result_dict = get_dict_from_file(input_file, image_id_list)
+        json_file_name = './result.json'
+        with open(json_file_name, 'w') as f:
+            json.dump(result_dict, f)
+
+        # set iouType to 'segm', 'bbox' or 'keypoints'
+        ann_type = ('segm', 'bbox', 'keypoints')
+        # specify type here
+        ann_type = ann_type[1]
+        coco_gt_file = './instances_val2017.json' # 修改为真实标签文件
+      ...
+      ```
+
+   5. 查看精度
+
+      执行以下命令计算精度。
+
+      ```shell
+      bash infersdk.sh
+      ```
+
+      推理结果以json格式保存,路径为“./result.json”。  
+      精度信息示例如下所示:
+
+      ```shell
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.455
+       Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.646
+       Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.495
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.278
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.481
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.565
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.358
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.575
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.605
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.424
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.632
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.710
+      ```
+
+   6. 性能测试。
+
+         开启性能统计开关,在sdk.conf配置文件中,设置 enable_ps=true,开启性能统计开关。
+
+         调整性能统计时间间隔,设置ps_interval_time=2,每隔2秒,进行一次性能统计。
+
+         进入infer/sdk目录,执行推理命令脚本,启动SDK推理服务 。
+
+   7. 查看性能结果。  
+
+         在日志目录"~/MX_SDK_HOME/logs"查看性能统计结果。
+
+         ```shell
+         performance-statistics.log.e2e.xx×
+         performance-statistics.log.plugin.xx×
+         performance-statistics.log.tpr.xxx
+         ```
+
+         其中e2e日志统计端到端时间,plugin日志统计单插件时间。
+
+# 在ModelArts上应用
+
+## 创建OBS桶
+
+1. 创建桶。
+
+* 登录[OBS管理控制台](https://storage.huaweicloud.com/obs),创建OBS桶,具体请参见[“创建桶”](https://support.huaweicloud.com/usermanual-obs/obs_03_0306.html)章节。
+* ”区域“选择”华北-北京四“
+* ”存储类别“选取”标准存储“
+* ”桶ACL“选取”私有“
+* 关闭”多AZ“
+* 输入全局唯一桶名称, 例如 “S3"
+* 点击”确定“
+
+2. 创建文件夹存放数据。
+
+   在创建的桶中创建以下文件夹:
+
+* code:存放训练脚本
+* datasets: 存放数据集
+* preckpt:存放预训练模型
+* output: 存放训练生成ckpt模型
+* logs:存放训练日志目录
+
+3. 上传代码
+
+* 进入 yolov4 代码文件根目录
+ * 将 yolov4 目录下的文件全部上传至 obs://S3/yolov4 文件夹下
+
+## 创建算法
+
+1. 使用华为云帐号登录[ModelArts管理控制台](https://console.huaweicloud.com/modelarts),在左侧导航栏中选择“算法管理”。
+2. 在“我的算法管理”界面,单击左上角“创建”,进入“创建算法”页面。
+3. 在“创建算法”页面,填写相关参数,然后单击“提交”。
+4. 设置算法基本信息如下。
+
+```text
+   # ==================================创建算法==========================================
+   # (1) 上传你的代码和数据集到 S3 桶上
+   # (2) 创建方式: 自定义脚本
+         AI引擎:Ascend-Powered-Engine mindspore_1.3.0-cann_5.0.2-py_3.7-euler_2.8.3-aarch64
+         代码目录: /S3/yolov4/
+         启动文件: /S3/yolov4/modelarts.py
+   # (3) 超参:
+         名称               类型            必需
+         data_url         String          是
+         train_url        String          是
+         checkpoint_url   String          是
+   # (4) 自定义超参:支持
+   # (5) 输入数据配置:  "映射名称 = '数据来源2'", "代码路径参数 = 'data_url'","映射名称 = '数据来源3'", "代码路径参数 = 'checkpoint_url'"
+   # (6) 输出数据配置:  "映射名称 = '输出数据1'", "代码路径参数 = 'train_url'"
+   # (7) 添加训练约束: 否
+```
+
+## 创建训练作业
+
+1. 登录ModelArts。
+
+2. 创建训练作业。
+
+    训练作业参数配置说明如下。
+
+   ```text
+   # ==================================创建训练作业=======================================
+   # (1) 算法: 在我的算法中选择前面创建的算法
+   # (2) 训练输入: '/S3/yolov4/datasets/'
+   # 在OBS桶/S3/gat/目录下新建output文件夹
+   # (3) 训练输出: '/S3/yolov4/output/'
+   # (4) 超参:
+            "data_dir = 'obs://S3/yolov4/datasets/'"
+            "train_dir='obs://S3/yolov4/output/'"
+            "checkpoint_url='obs://S3/yolov4/preckpt/'"
+   # (5) 设置作业日志路径
+            "log='obs://S3/yolov4/log/'"
+   ```
+
+3. 单击“提交”,完成训练作业的创建。
+
+   训练作业一般需要运行一段时间,根据您选择的数据量和资源不同,训练时间将耗时几分钟左右。训练结果模型将保存在 obs://S3/gat/results/model/ 文件夹下。
+
+## 查看训练任务日志
+
+1. 训练完成后进入logs文件夹,点击对应当次训练作业的日志文件即可。
+
+2. logs文件夹内生成日志文件,您可在  /logs 文件夹下的日志文件中找到如下结果:
+
+      ```text
+      2022-03-29 13:36:59,826:INFO:epoch[0], iter[117199], loss:495.129946, per step time: 45.80 ms, fps: 21.83, lr:0.011993246152997017
+      ...
+      2022-03-29 13:53:04,842:INFO:Calculating mAP...
+      2022-03-29 14:24:23,597:INFO:result file path: /home/ma-user/modelarts/outputs/train_url_0/2022-03-29_time_11_31_12/predict_2022_03_29_14_22_47.json
+     ...
+     Accumulating evaluation results...
+     DONE (t=14.87s).
+     2022-03-29 14:27:32,440:INFO:epoch: 1, mAP:
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.001
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.001
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.002
+      ```
diff --git a/official/cv/yolov4/infer/convert/aipp.config b/official/cv/yolov4/infer/convert/aipp.config
new file mode 100644
index 0000000000000000000000000000000000000000..c92867780fbc3b905755e40b690bd7e34186dcf4
--- /dev/null
+++ b/official/cv/yolov4/infer/convert/aipp.config
@@ -0,0 +1,26 @@
+aipp_op {
+    aipp_mode : static
+    input_format : RGB888_U8
+    related_input_rank : 0
+    csc_switch : false
+    rbuv_swap_switch : true
+    matrix_r0c0 : 256
+    matrix_r0c1 : 0
+    matrix_r0c2 : 359
+    matrix_r1c0 : 256
+    matrix_r1c1 : -88
+    matrix_r1c2 : -183
+    matrix_r2c0 : 256
+    matrix_r2c1 : 454
+    matrix_r2c2 : 0
+    input_bias_0 : 0
+    input_bias_1 : 128
+    input_bias_2 : 128
+    
+    mean_chn_0 : 124
+    mean_chn_1 : 117
+    mean_chn_2 : 104
+    var_reci_chn_0 : 0.0171247538316637
+    var_reci_chn_1 : 0.0175070028011204
+    var_reci_chn_2 : 0.0174291938997821
+}
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/convert/air2om.sh b/official/cv/yolov4/infer/convert/air2om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..509102a0ae7bc2844c31534c464602d799860e41
--- /dev/null
+++ b/official/cv/yolov4/infer/convert/air2om.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+model_path=$1
+output_model_name=$2
+
+atc \
+    --model=$model_path \
+    --framework=1 \
+    --output=$output_model_name \
+    --input_format=NCHW --input_shape="actual_input_1:1,3,416,416" \
+    --enable_small_channel=1 \
+    --log=error \
+    --soc_version=Ascend310 \
+    --insert_op_conf=./aipp.config
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/data/models/coco_trainval_anns.py b/official/cv/yolov4/infer/data/models/coco_trainval_anns.py
new file mode 100644
index 0000000000000000000000000000000000000000..6df56a50f280cdcbf7fc0554b69b7cb4107bed5d
--- /dev/null
+++ b/official/cv/yolov4/infer/data/models/coco_trainval_anns.py
@@ -0,0 +1,88 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import argparse
+import json
+import os
+from collections import defaultdict
+import cv2
+
+parser = argparse.ArgumentParser(description='YOLOV4')
+parser.add_argument('--data_url', type=str, default='./datasets', help='coco2017 datasets')
+parser.add_argument('--train_url', type=str, default='./infer/data/models/', help='save txt file')
+parser.add_argument('--val_url', type=str, default='./infer/data/images/', help='coco2017 val infer datasets')
+args_opt, _ = parser.parse_known_args()
+
+def name_box_parse(json_path):
+    with open(json_path, encoding='utf-8') as f:
+        data = json.load(f)
+        annotations = data['annotations']
+        for ant in annotations:
+            image_id = ant['image_id']
+            name = str("%012d.jpg" % image_id)
+            cat = ant['category_id']
+
+            if 1 <= cat <= 11:
+                cat = cat - 1
+            elif 13 <= cat <= 25:
+                cat = cat - 2
+            elif 27 <= cat <= 28:
+                cat = cat - 3
+            elif 31 <= cat <= 44:
+                cat = cat - 5
+            elif 46 <= cat <= 65:
+                cat = cat - 6
+            elif cat == 67:
+                cat = cat - 7
+            elif cat == 70:
+                cat = cat - 9
+            elif 72 <= cat <= 82:
+                cat = cat - 10
+            elif 84 <= cat <= 90:
+                cat = cat - 11
+            name_box_id[name].append([ant['bbox'], cat])
+
+
+name_box_id = defaultdict(list)
+id_name = dict()
+name_box_parse(os.path.join(args_opt.data_url, 'annotations', 'instances_val2017.json'))
+
+with open(os.path.join(args_opt.train_url, 'trainval.txt'), 'w') as g:
+    ii = 0
+    for idx, key in enumerate(name_box_id.keys()):
+        print('trainval', key.split('/')[-1])
+
+        g.write('%d ' % ii)
+        ii += 1
+        g.write(os.path.join(args_opt.val_url, key))
+
+        print(os.path.join(args_opt.data_url, 'val2017', key))
+
+        img = cv2.imread(os.path.join(args_opt.data_url, 'val2017', key))
+        h, w, c = img.shape
+
+        g.write(' %d %d' % (w, h))
+
+        box_infos = name_box_id[key]
+        for info in box_infos:
+            x_min = int(info[0][0])
+            y_min = int(info[0][1])
+            x_max = x_min + int(info[0][2])
+            y_max = y_min + int(info[0][3])
+
+            box_info = " %d %d %d %d %d" % (
+                int(info[1]), x_min, y_min, x_max, y_max
+            )
+            g.write(box_info)
+        g.write('\n')
diff --git a/official/cv/yolov4/infer/data/models/object_task_metric.py b/official/cv/yolov4/infer/data/models/object_task_metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..74fca693814e4c558169aaa3726530b93be1ff32
--- /dev/null
+++ b/official/cv/yolov4/infer/data/models/object_task_metric.py
@@ -0,0 +1,151 @@
+#Copyright 2022 Huawei Technologies Co., Ltd
+#
+#Licensed under the Apache License, Version 2.0 (the "License");
+#you may not use this file except in compliance with the License.
+#You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#Unless required by applicable law or agreed to in writing, software
+#distributed under the License is distributed on an "AS IS" BASIS,
+#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#See the License for the specific language governing permissions and
+#limitations under the License.
+
+import os
+import json
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+
+
+def get_image_id(label_file):
+    """
+    :param: label file path, default is coco2017_minival.txt
+    :return: image id
+    """
+    image_list = []
+    with open(label_file, 'r')as f_read:
+        ban_list = f_read.read().split('\n')[:-1]
+        for item in ban_list:
+            image_path = item.split(' ')[1]
+            image_name = image_path.split('/')[-1]
+            image_id = image_name.split('.')[0].split('_')[-1]
+            image_list.append(int(image_id))
+    return image_list
+
+
+def get_category_id(class_id):
+    """
+    :param: class id which corresponding coco.names
+    :return: category id is used in instances_val2017.json
+    """
+    if 0 <= class_id <= 10:
+        class_id = class_id + 1
+    elif 11 <= class_id <= 23:
+        class_id = class_id + 2
+    elif 24 <= class_id <= 25:
+        class_id = class_id + 3
+    elif 26 <= class_id <= 39:
+        class_id = class_id + 5
+    elif 40 <= class_id <= 59:
+        class_id = class_id + 6
+    elif class_id == 60:
+        class_id = class_id + 7
+    elif class_id == 61:
+        class_id = class_id + 9
+    elif 62 <= class_id <= 72:
+        class_id = class_id + 10
+    elif 73 <= class_id <= 79:
+        class_id = class_id + 11
+    return class_id
+
+def get_img_set(anno_json_path):
+    """Get image path and annotation from COCO."""
+    need_img_ids = []
+    coco = COCO(anno_json_path)
+    image_ids = coco.getImgIds()
+    print("first dataset is {}".format(len(image_ids)))
+    for img_id in image_ids:
+        iscrowd = False
+        anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)
+        anno = coco.loadAnns(anno_ids)
+        for label in anno:
+            iscrowd = iscrowd or label["iscrowd"]
+
+        if iscrowd:
+            continue
+        need_img_ids.append(img_id)
+
+    return need_img_ids
+
+def get_dict_from_file(file_path, id_list):
+    """
+    :param: file_path contain all infer result
+    :param: id_list contain all images id which is corresponding instances_val2017.json
+    :return: dict_list contain infer result of every images
+    """
+    print(len(id_list))
+    ls = []
+    image_dict = {}
+    count = -1
+    with open(file_path, 'r')as fs:
+        ban_list = fs.read().split('\n')
+        for item in ban_list:
+            if item == '':
+                continue
+            if item[0] != '#':
+                count = count + 1
+                continue
+            image_list = item.split(',')
+            image_dict['image_id'] = id_list[count]
+            image_dict['category_id'] = get_category_id(int(image_list[-1].strip().split(' ')[-1]))
+            bbox_list = [float(i) for i in image_list[1].strip().split(' ')[1:]]
+            bbox_list[2] = bbox_list[2] - bbox_list[0]
+            bbox_list[3] = bbox_list[3] - bbox_list[1]
+            image_dict['bbox'] = bbox_list
+            image_dict['score'] = float(image_list[2].strip().split(' ')[-1])
+            ls.append(image_dict.copy())
+    return ls
+
+
+def get_img_id(file_name):
+    """
+    get image id list from result data
+    """
+    ls = []
+    myset = []
+    annos = json.load(open(file_name, 'r'))
+    for anno in annos:
+        ls.append(anno['image_id'])
+    myset = {}.fromkeys(ls).keys()
+    return myset
+
+
+if __name__ == "__main__":
+    ban_path = './trainval.txt'
+    input_file = './result/result.txt'
+    if not os.path.exists(ban_path):
+        print('The infer text file does not exist.')
+    if not os.path.exists(input_file):
+        print('The result text file does not exist.')
+
+    image_id_list = get_image_id(ban_path)
+    result_dict = get_dict_from_file(input_file, image_id_list)
+    json_file_name = './result.json'
+    with open(json_file_name, 'w') as f:
+        json.dump(result_dict, f)
+
+    # set iouType to 'segm', 'bbox' or 'keypoints'
+    ann_type = ('segm', 'bbox', 'keypoints')
+    # specify type here
+    ann_type = ann_type[1]
+    coco_gt_file = './instances_val2017.json'
+    coco_gt = COCO(coco_gt_file)
+    coco_dt_file = './result.json'
+
+    coco_dt = coco_gt.loadRes(coco_dt_file)
+    coco_eval = COCOeval(coco_gt, coco_dt, ann_type)
+    coco_eval.params.imgIds = get_img_set(coco_gt_file)
+    coco_eval.evaluate()
+    coco_eval.accumulate()
+    coco_eval.summarize()
diff --git a/official/cv/yolov4/infer/data/models/yolov4_coco2017_acc_test.cfg b/official/cv/yolov4/infer/data/models/yolov4_coco2017_acc_test.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..d15c07fcabc40945798ffef4fb7ce701ee8b7f12
--- /dev/null
+++ b/official/cv/yolov4/infer/data/models/yolov4_coco2017_acc_test.cfg
@@ -0,0 +1,11 @@
+# hyper-parameter
+CLASS_NUM=80
+BIASES_NUM=18
+BIASES=12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401
+SCORE_THRESH=0.001
+OBJECTNESS_THRESH=0.001
+IOU_THRESH=0.6
+YOLO_TYPE=3
+ANCHOR_DIM=3
+MODEL_TYPE=0
+RESIZE_FLAG=0
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/docker_start_infer.sh b/official/cv/yolov4/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f8686b739ce0a2ca0f9d27d9cf2ff20b5034f26d
--- /dev/null
+++ b/official/cv/yolov4/infer/docker_start_infer.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+share_dir=$2
+data_dir=$3
+echo "$1"
+echo "$2"
+if [ -z "${docker_image}" ]; then
+    echo "please input docker_image"
+    exit 1
+fi
+
+if [ ! -d "${share_dir}" ]; then
+    echo "please input share directory that contains dataset, models and codes"
+    exit 1
+fi
+
+
+docker run -it \
+    --device=/dev/davinci0 \
+    --device=/dev/davinci_manager \
+    --device=/dev/devmm_svm \
+    --device=/dev/hisi_hdc \
+    --privileged \
+    -v //usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
+    -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+    -v ${data_dir}:${data_dir}  \
+    -v ${share_dir}:${share_dir} \
+    -u root \
+    ${docker_image} \
+    /bin/bash
diff --git a/official/cv/yolov4/infer/mxbase/CMakeLists.txt b/official/cv/yolov4/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a2ba5770d37047e70013e003ca9fed9f550566d1
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,43 @@
+cmake_minimum_required(VERSION 3.10.0)
+project(Yolov4post)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+#set(PLUGIN_NAME "Yolov4_mindspore_post")
+set(TARGET_LIBRARY Yolov4_mindspore_post)
+set(TARGET_MAIN Yolov4_mindspore)
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+#message("ACL_LIB_PATH:${ACL_LIB_PATH}/lib64/.")
+#include_directories(${CMAKE_CURRENT_BINARY_DIR})
+include_directories(${ACL_LIB_PATH}/include)
+link_directories(${ACL_LIB_PATH}/lib64/)
+
+add_library(${TARGET_LIBRARY} SHARED src/PostProcess/Yolov4MindsporePost.cpp)
+
+target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0)
+target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxpidatatype mxbase)
+target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s)
+
+message("TARGET_LIBRARY:${TARGET_LIBRARY}.")
+
+add_executable(${TARGET_MAIN} src/main.cpp src/Yolov4Detection.cpp)
+target_link_libraries(${TARGET_MAIN} ${TARGET_LIBRARY} glog  cpprest mxbase libascendcl.so  opencv_world)
diff --git a/official/cv/yolov4/infer/mxbase/build.sh b/official/cv/yolov4/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..39064b27800a247e75f196c382a82ec63f8813bc
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/build.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+
+# env
+rm -r build
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make);
+    then
+      echo "make failed."
+      return 1
+    fi
+
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
diff --git a/official/cv/yolov4/infer/mxbase/infermxbase.sh b/official/cv/yolov4/infer/mxbase/infermxbase.sh
new file mode 100644
index 0000000000000000000000000000000000000000..85e7e1b5912aaae917cf347b2ead13053d563880
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/infermxbase.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 object_task_metric.py
+exit 0
diff --git a/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.cpp b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..63713a34725a4605f75af9c0d6671889b83903cd
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.cpp
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Yolov4MindsporePost.h"
+#include <algorithm>
+#include <string>
+#include <memory>
+#include "MxBase/Log/Log.h"
+#include "MxBase/CV/ObjectDetection/Nms/Nms.h"
+
+namespace {
+const int SCALE = 32;
+const int BIASESDIM = 2;
+const int OFFSETWIDTH = 2;
+const int OFFSETHEIGHT = 3;
+const int OFFSETBIASES = 1;
+const int OFFSETOBJECTNESS = 1;
+
+const int NHWC_HEIGHTINDEX = 1;
+const int NHWC_WIDTHINDEX = 2;
+const int NCHW_HEIGHTINDEX = 2;
+const int NCHW_WIDTHINDEX = 3;
+const int YOLO_INFO_DIM = 5;
+
+auto uint8Deleter = [] (uint8_t* p) { };
+}  // namespace
+
+namespace localParameter {
+    const uint32_t VECTOR_FIRST_INDEX = 0;
+    const uint32_t VECTOR_SECOND_INDEX = 1;
+    const uint32_t VECTOR_THIRD_INDEX = 2;
+    const uint32_t VECTOR_FOURTH_INDEX = 3;
+    const uint32_t VECTOR_FIFTH_INDEX = 4;
+}
+
+namespace MxBase {
+Yolov4PostProcess& Yolov4PostProcess::operator=(const Yolov4PostProcess &other) {
+    if (this == &other) {
+        return *this;
+    }
+    ObjectPostProcessBase::operator=(other);
+    objectnessThresh_ = other.objectnessThresh_;  // Threshold of objectness value
+    iouThresh_ = other.iouThresh_;
+    anchorDim_ = other.anchorDim_;
+    biasesNum_ = other.biasesNum_;
+    yoloType_ = other.yoloType_;
+    modelType_ = other.modelType_;
+    inputType_ = other.inputType_;
+    biases_ = other.biases_;
+    return *this;
+}
+
+APP_ERROR Yolov4PostProcess::Init(const std::map<std::string, std::shared_ptr<void>>& postConfig) {
+    LogDebug << "Start to Init Yolov4PostProcess.";
+    APP_ERROR ret = ObjectPostProcessBase::Init(postConfig);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Fail to superInit in ObjectPostProcessBase.";
+        return ret;
+    }
+
+    configData_.GetFileValue<int>("BIASES_NUM", biasesNum_);
+    std::string str;
+    configData_.GetFileValue<std::string>("BIASES", str);
+    configData_.GetFileValue<float>("OBJECTNESS_THRESH", objectnessThresh_);
+    configData_.GetFileValue<float>("IOU_THRESH", iouThresh_);
+    configData_.GetFileValue<int>("YOLO_TYPE", yoloType_);
+    configData_.GetFileValue<int>("MODEL_TYPE", modelType_);
+    configData_.GetFileValue<int>("YOLO_VERSION", yoloVersion_);
+    configData_.GetFileValue<int>("INPUT_TYPE", inputType_);
+    configData_.GetFileValue<int>("ANCHOR_DIM", anchorDim_);
+    ret = GetBiases(str);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Failed to get biases.";
+        return ret;
+    }
+    LogDebug << "End to Init Yolov4PostProcess.";
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4PostProcess::DeInit() {
+    return APP_ERR_OK;
+}
+
+bool Yolov4PostProcess::IsValidTensors(const std::vector<TensorBase> &tensors) {
+    if (tensors.size() != (size_t)yoloType_) {
+        LogError << "number of tensors (" << tensors.size() << ") " << "is unequal to yoloType_("
+                 << yoloType_ << ")";
+        return false;
+    }
+    if (yoloVersion_ == YOLOV4_VERSION) {
+        for (size_t i = 0; i < tensors.size(); i++) {
+            auto shape = tensors[i].GetShape();
+            if (shape.size() < localParameter::VECTOR_FIFTH_INDEX) {
+                LogError << "dimensions of tensor [" << i << "] is less than "
+                         << localParameter::VECTOR_FIFTH_INDEX << ".";
+                return false;
+            }
+            uint32_t channelNumber = 1;
+            int startIndex = modelType_ ? localParameter::VECTOR_SECOND_INDEX : localParameter::VECTOR_FOURTH_INDEX;
+            int endIndex = modelType_ ? (shape.size() - localParameter::VECTOR_THIRD_INDEX) : shape.size();
+            for (int j = startIndex; j < endIndex; j++) {
+                channelNumber *= shape[j];
+            }
+            if (channelNumber != anchorDim_ * (classNum_ + YOLO_INFO_DIM)) {
+                LogError << "channelNumber(" << channelNumber << ") != anchorDim_ * (classNum_ + 5).";
+                return false;
+            }
+        }
+    }
+    return true;
+}
+
+void Yolov4PostProcess::ObjectDetectionOutput(const std::vector<TensorBase>& tensors,
+                                              std::vector<std::vector<ObjectInfo>>& objectInfos,
+                                              const std::vector<ResizedImageInfo>& resizedImageInfos) {
+    LogDebug << "Yolov4PostProcess start to write results.";
+    if (tensors.size() == 0) {
+        return;
+    }
+    auto shape = tensors[0].GetShape();
+    if (shape.size() == 0) {
+        return;
+    }
+    uint32_t batchSize = shape[0];
+    for (uint32_t i = 0; i < batchSize; i++) {
+        std::vector<std::shared_ptr<void>> featLayerData = {};
+        std::vector<std::vector<size_t>> featLayerShapes = {};
+        for (uint32_t j = 0; j < tensors.size(); j++) {
+            auto dataPtr = reinterpret_cast<uint8_t *> (tensors[j].GetBuffer()) +
+            i * tensors[j].GetByteSize() / batchSize;
+            std::shared_ptr<void> tmpPointer;
+            tmpPointer.reset(dataPtr, uint8Deleter);
+            featLayerData.push_back(tmpPointer);
+            shape = tensors[j].GetShape();
+            std::vector<size_t> featLayerShape(shape.size());
+            transform(shape.begin(), shape.end(), featLayerShape.begin(), [](uint32_t s) { return (size_t)s; });
+            featLayerShapes.push_back(featLayerShape);
+        }
+        std::vector<ObjectInfo> objectInfo;
+        GenerateBbox(featLayerData, objectInfo, featLayerShapes, resizedImageInfos[i].widthResize,
+            resizedImageInfos[i].heightResize);
+        MxBase::NmsSort(objectInfo, iouThresh_);
+        objectInfos.push_back(objectInfo);
+    }
+    LogDebug << "Yolov4PostProcess write results success.";
+}
+
+APP_ERROR Yolov4PostProcess::Process(const std::vector<TensorBase> &tensors,
+                                     std::vector<std::vector<ObjectInfo>> &objectInfos,
+                                     const std::vector<ResizedImageInfo> &resizedImageInfos,
+                                     const std::map<std::string, std::shared_ptr<void>> &configParamMap) {
+    LogDebug << "Start to Process Yolov4PostProcess.";
+    APP_ERROR ret = APP_ERR_OK;
+    auto inputs = tensors;
+    ret = CheckAndMoveTensors(inputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "CheckAndMoveTensors failed. ret=" << ret;
+        return ret;
+    }
+
+    ObjectDetectionOutput(inputs, objectInfos, resizedImageInfos);
+
+    for (uint32_t i = 0; i < resizedImageInfos.size(); i++) {
+        CoordinatesReduction(i, resizedImageInfos[i], objectInfos[i]);
+    }
+    LogObjectInfos(objectInfos);
+    LogDebug << "End to Process Yolov4PostProcess.";
+    return APP_ERR_OK;
+}
+
+void Yolov4PostProcess::CompareProb(int& classID, float& maxProb, float classProb, int classNum) {
+    if (classProb > maxProb) {
+        maxProb = classProb;
+        classID = classNum;
+    }
+}
+
+void Yolov4PostProcess::SelectClassNHWC(std::shared_ptr<void> netout, NetInfo info,
+                                          std::vector<MxBase::ObjectInfo>& detBoxes, int stride) {
+    const int offsetY = 1;
+    for (int j = 0; j < stride; ++j) {
+        for (int k = 0; k < info.anchorDim; ++k) {
+            int bIdx = (info.bboxDim + 1 + info.classNum) * info.anchorDim * j +
+                       k * (info.bboxDim + 1 + info.classNum);
+            int oIdx = bIdx + info.bboxDim;  // objectness index
+            float objectness = static_cast<float *>(netout.get())[oIdx];
+            if (objectness <= objectnessThresh_) {
+                continue;
+            }
+            int classID = -1;
+            float maxProb = scoreThresh_;
+            for (int c = 0; c < info.classNum; ++c) {
+                float clsProb = static_cast<float *>(netout.get())[bIdx +
+                    (info.bboxDim + OFFSETOBJECTNESS + c)] * objectness;
+                CompareProb(classID, maxProb, clsProb, c);
+            }
+            if (classID < 0) continue;
+            MxBase::ObjectInfo det;
+            float x = static_cast<float *>(netout.get())[bIdx];
+            float y = static_cast<float *>(netout.get())[bIdx + offsetY];
+            float width = static_cast<float *>(netout.get())[bIdx + OFFSETWIDTH];
+            float height = static_cast<float *>(netout.get())[bIdx + OFFSETHEIGHT];
+            det.x0 = std::max(0.0f, x - width / COORDINATE_PARAM);
+            det.x1 = std::min(1.0f, x + width / COORDINATE_PARAM);
+            det.y0 = std::max(0.0f, y - height / COORDINATE_PARAM);
+            det.y1 = std::min(1.0f, y + height / COORDINATE_PARAM);
+            det.classId = classID;
+            det.className = configData_.GetClassName(classID);
+            det.confidence = maxProb;
+            if (det.confidence < separateScoreThresh_[classID]) continue;
+            detBoxes.emplace_back(det);
+        }
+    }
+}
+
+void Yolov4PostProcess::GenerateBbox(std::vector<std::shared_ptr<void>> featLayerData,
+                                     std::vector<MxBase::ObjectInfo> &detBoxes,
+                                     const std::vector<std::vector<size_t>>& featLayerShapes, const int netWidth,
+                                     const int netHeight) {
+    NetInfo netInfo;
+    netInfo.anchorDim = anchorDim_;
+    netInfo.bboxDim = BOX_DIM;
+    netInfo.classNum = classNum_;
+    netInfo.netWidth = netWidth;
+    netInfo.netHeight = netHeight;
+    for (int i = 0; i < yoloType_; ++i) {
+        int widthIndex_ = modelType_ ? NCHW_WIDTHINDEX : NHWC_WIDTHINDEX;
+        int heightIndex_ = modelType_ ? NCHW_HEIGHTINDEX : NHWC_HEIGHTINDEX;
+        OutputLayer layer = {featLayerShapes[i][widthIndex_], featLayerShapes[i][heightIndex_]};
+        int logOrder = log(featLayerShapes[i][widthIndex_] * SCALE / netWidth) / log(BIASESDIM);
+        int startIdx = (yoloType_ - 1 - logOrder) * netInfo.anchorDim * BIASESDIM;
+        int endIdx = startIdx + netInfo.anchorDim * BIASESDIM;
+        int idx = 0;
+        for (int j = startIdx; j < endIdx; ++j) {
+            layer.anchors[idx++] = biases_[j];
+        }
+        int stride = layer.width * layer.height;
+        std::shared_ptr<void> netout = featLayerData[i];
+        SelectClassNHWC(netout, netInfo, detBoxes, stride);
+    }
+}
+
+APP_ERROR Yolov4PostProcess::GetBiases(std::string& strBiases) {
+    if (biasesNum_ <= 0) {
+        LogError << GetError(APP_ERR_COMM_INVALID_PARAM) << "Failed to get biasesNum (" << biasesNum_ << ").";
+        return APP_ERR_COMM_INVALID_PARAM;
+    }
+    biases_.clear();
+    int i = 0;
+    int num = strBiases.find(",");
+    while (num >= 0 && i < biasesNum_) {
+        std::string tmp = strBiases.substr(0, num);
+        num++;
+        strBiases = strBiases.substr(num, strBiases.size());
+        biases_.push_back(stof(tmp));
+        i++;
+        num = strBiases.find(",");
+    }
+    if (i != biasesNum_ - 1 || strBiases.size() == 0) {
+        LogError << GetError(APP_ERR_COMM_INVALID_PARAM) << "biasesNum (" << biasesNum_
+                 << ") is not equal to total number of biases (" << strBiases <<").";
+        return APP_ERR_COMM_INVALID_PARAM;
+    }
+    biases_.push_back(stof(strBiases));
+    return APP_ERR_OK;
+}
+
+#ifndef ENABLE_POST_PROCESS_INSTANCE
+extern "C" {
+std::shared_ptr<MxBase::Yolov4PostProcess> GetObjectInstance() {
+    LogInfo << "Begin to get Yolov4PostProcess instance.";
+    auto instance = std::make_shared<Yolov4PostProcess>();
+    LogInfo << "End to get Yolov4PostProcess instance.";
+    return instance;
+}
+}
+#endif
+}  // namespace MxBase
diff --git a/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.h b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.h
new file mode 100644
index 0000000000000000000000000000000000000000..b9afe9ab326f72b5d689d9d27cd2ec817b34a343
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef YOLOV4_POST_PROCESS_H
+#define YOLOV4_POST_PROCESS_H
+#include <algorithm>
+#include <vector>
+#include <map>
+#include <string>
+#include <memory>
+#include <opencv4/opencv2/opencv.hpp>
+#include "MxBase/ErrorCode/ErrorCode.h"
+#include "MxBase/CV/Core/DataType.h"
+#include "MxBase/PostProcessBases/ObjectPostProcessBase.h"
+
+const float DEFAULT_OBJECTNESS_THRESH = 0.3;
+const float DEFAULT_IOU_THRESH = 0.45;
+const int DEFAULT_ANCHOR_DIM = 3;
+const int DEFAULT_BIASES_NUM = 18;
+const int DEFAULT_YOLO_TYPE = 3;
+const int DEFAULT_YOLO_VERSION = 4;
+const int YOLOV3_VERSION = 3;
+const int YOLOV4_VERSION = 4;
+const int YOLOV5_VERSION = 5;
+const int ANCHOR_NUM = 6;
+struct OutputLayer {
+    size_t width;
+    size_t height;
+    float anchors[ANCHOR_NUM];
+};
+
+struct NetInfo {
+    int anchorDim;
+    int classNum;
+    int bboxDim;
+    int netWidth;
+    int netHeight;
+};
+
+namespace MxBase {
+class Yolov4PostProcess : public ObjectPostProcessBase {
+ public:
+     Yolov4PostProcess() = default;
+
+     ~Yolov4PostProcess() = default;
+
+     Yolov4PostProcess(const Yolov4PostProcess &other) = default;
+
+     Yolov4PostProcess &operator=(const Yolov4PostProcess &other);
+
+     APP_ERROR Init(const std::map<std::string, std::shared_ptr<void>> &postConfig) override;
+
+     APP_ERROR DeInit() override;
+
+     APP_ERROR Process(const std::vector<TensorBase> &tensors, std::vector<std::vector<ObjectInfo>> &objectInfos,
+                      const std::vector<ResizedImageInfo> &resizedImageInfos = {},
+                      const std::map<std::string, std::shared_ptr<void>> &configParamMap = {}) override;
+
+ protected:
+     bool IsValidTensors(const std::vector<TensorBase> &tensors);
+
+     void ObjectDetectionOutput(const std::vector<TensorBase> &tensors,
+                               std::vector<std::vector<ObjectInfo>> &objectInfos,
+                               const std::vector<ResizedImageInfo> &resizedImageInfos = {});
+
+     void CompareProb(int& classID, float& maxProb, float classProb, int classNum);
+     void SelectClassNHWC(std::shared_ptr<void> netout, NetInfo info, std::vector<MxBase::ObjectInfo>& detBoxes,
+                         int stride);
+     void GenerateBbox(std::vector<std::shared_ptr<void>> featLayerData,
+                      std::vector<MxBase::ObjectInfo> &detBoxes,
+                      const std::vector<std::vector<size_t>>& featLayerShapes,
+                      const int netWidth, const int netHeight);
+     APP_ERROR GetBiases(std::string& strBiases);
+
+ protected:
+     float objectnessThresh_ = DEFAULT_OBJECTNESS_THRESH;  // Threshold of objectness value
+     float iouThresh_ = DEFAULT_IOU_THRESH;  // Non-Maximum Suppression threshold
+     int anchorDim_ = DEFAULT_ANCHOR_DIM;
+     int biasesNum_ = DEFAULT_BIASES_NUM;  // anchors, generate from train data, coco dataset
+     int yoloType_ = DEFAULT_YOLO_TYPE;
+     int modelType_ = 0;
+     int yoloVersion_ = DEFAULT_YOLO_VERSION;
+     int inputType_ = 0;
+     std::vector<float> biases_ = {};
+};
+#ifndef ENABLE_POST_PROCESS_INSTANCE
+extern "C" {
+std::shared_ptr<MxBase::Yolov4PostProcess> GetObjectInstance();
+}
+#endif
+}  // namespace MxBase
+#endif
diff --git a/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.cpp b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c520c13d5094dab3fce5917183294eb0336d3cf8
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Yolov4Detection.h"
+#include <unistd.h>
+#include <sys/stat.h>
+#include <utility>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+namespace {
+const uint32_t YUV_BYTE_NU = 3;
+const uint32_t YUV_BYTE_DE = 2;
+const uint32_t VPC_H_ALIGN = 2;
+}  // namespace
+
+APP_ERROR Yolov4DetectionOpencv::LoadLabels(const std::string &labelPath, std::map<int, std::string> &labelMap) {
+    std::ifstream infile;
+    // open label file
+    infile.open(labelPath, std::ios_base::in);
+    std::string s;
+    // check label file validity
+    if (infile.fail()) {
+        LogError << "Failed to open label file: " << labelPath << ".";
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    labelMap.clear();
+    // construct label map
+    int count = 0;
+    while (std::getline(infile, s)) {
+        if (s[0] == '#') {
+            continue;
+        }
+        size_t eraseIndex = s.find_last_not_of("\r\n\t");
+        if (eraseIndex != std::string::npos) {
+            s.erase(eraseIndex + 1, s.size() - eraseIndex);
+        }
+        labelMap.insert(std::pair<int, std::string>(count, s));
+        count++;
+    }
+    infile.close();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
+    ret = dvppWrapper_->Init();
+    if (ret != APP_ERR_OK) {
+        LogError << "DvppWrapper init failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    MxBase::ConfigData configData;
+    const std::string checkTensor = initParam.checkTensor ? "true" : "false";
+    configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum));
+    configData.SetJsonValue("BIASES_NUM", std::to_string(initParam.biasesNum));
+    configData.SetJsonValue("BIASES", initParam.biases);
+    configData.SetJsonValue("OBJECTNESS_THRESH", initParam.objectnessThresh);
+    configData.SetJsonValue("IOU_THRESH", initParam.iouThresh);
+    configData.SetJsonValue("SCORE_THRESH", initParam.scoreThresh);
+    configData.SetJsonValue("YOLO_TYPE", std::to_string(initParam.yoloType));
+    configData.SetJsonValue("MODEL_TYPE", std::to_string(initParam.modelType));
+    configData.SetJsonValue("INPUT_TYPE", std::to_string(initParam.inputType));
+    configData.SetJsonValue("ANCHOR_DIM", std::to_string(initParam.anchorDim));
+    configData.SetJsonValue("CHECK_MODEL", checkTensor);
+
+    auto jsonStr = configData.GetCfgJson().serialize();
+    std::map<std::string, std::shared_ptr<void>> config;
+    config["postProcessConfigContent"] = std::make_shared<std::string>(jsonStr);
+    config["labelPath"] = std::make_shared<std::string>(initParam.labelPath);
+
+    post_ = std::make_shared<MxBase::Yolov4PostProcess>();
+    ret = post_->Init(config);
+    if (ret != APP_ERR_OK) {
+        LogError << "Resnet50PostProcess init failed, ret=" << ret << ".";
+        return ret;
+    }
+    // load labels from file
+    ret = LoadLabels(initParam.labelPath, labelMap_);
+    if (ret != APP_ERR_OK) {
+        LogError << "Failed to load labels, ret=" << ret << ".";
+        return ret;
+    }
+    LogInfo << "End to Init Yolov4DetectionOpencv.";
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::DeInit() {
+    dvppWrapper_->DeInit();
+    model_->DeInit();
+    post_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::ReadImage(const std::string &imgPath, cv::Mat &imageMat) {
+    imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    imageWidth_ = imageMat.cols;
+    imageHeight_ = imageMat.rows;
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat) {
+    static constexpr uint32_t resizeHeight = 608;
+    static constexpr uint32_t resizeWidth = 608;
+    cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight));
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase) {
+    const uint32_t dataSize = imageMat.cols * imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU;
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(imageMat.data, dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+    std::vector<uint32_t> shape = {imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(imageMat.cols)};
+    tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT8);
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Inference(const std::vector<MxBase::TensorBase> &inputs,
+    std::vector<MxBase::TensorBase> &outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs.push_back(tensor);
+    }
+
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    g_inferCost.push_back(costMs);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::PostProcess(const std::vector<MxBase::TensorBase> &outputs,
+                                             std::vector<std::vector<MxBase::ObjectInfo>> &objInfos) {
+    MxBase::ResizedImageInfo imgInfo;
+    imgInfo.widthOriginal = imageWidth_;
+    imgInfo.heightOriginal = imageHeight_;
+    imgInfo.widthResize = 608;
+    imgInfo.heightResize = 608;
+    imgInfo.resizeType = MxBase::RESIZER_STRETCHING;
+    std::vector<MxBase::ResizedImageInfo> imageInfoVec = {};
+    imageInfoVec.push_back(imgInfo);
+    APP_ERROR ret = post_->Process(outputs, objInfos, imageInfoVec);
+    if (ret != APP_ERR_OK) {
+        LogError << "Process failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::WriteResult(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos) {
+    std::string resultPathName = "result";
+    uint32_t batchSize = objInfos.size();
+    // create result directory when it does not exit
+    if (access(resultPathName.c_str(), 0) != 0) {
+        int ret = mkdir(resultPathName.c_str(), S_IRUSR | S_IWUSR | S_IXUSR);
+        if (ret != 0) {
+            LogError << "Failed to create result directory: " << resultPathName << ", ret = " << ret;
+            return APP_ERR_COMM_OPEN_FAIL;
+        }
+    }
+    // create result file under result directory
+    resultPathName = resultPathName + "/result.txt";
+    std::ofstream tfile(resultPathName, std::ofstream::app);
+    if (tfile.fail()) {
+        LogError << "Failed to open result file: " << resultPathName;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    // write inference result into file
+    for (uint32_t i = 0; i < batchSize; i++) {
+        tfile << "Object detected num is " << objInfos[i].size() << std::endl;
+        for (uint32_t j = 0; j < objInfos[i].size(); j++) {
+            tfile << "#Obj: " << j << ", box: " << objInfos[i][j].x0 << " " << objInfos[i][j].y0 << " "
+                << objInfos[i][j].x1 << " " << objInfos[i][j].y1
+                << ", confidence: " << objInfos[i][j].confidence << ", label: " << labelMap_[objInfos[i][j].classId]
+                << ", id: " << objInfos[i][j].classId << std::endl;
+        }
+    }
+
+    tfile.close();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Process(const std::string &imgPath) {
+    // process image
+    cv::Mat imageMat;
+    APP_ERROR ret = ReadImage(imgPath, imageMat);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImage failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = Resize(imageMat, imageMat);
+    if (ret != APP_ERR_OK) {
+        LogError << "Resize failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs = {};
+    MxBase::TensorBase tensorBase;
+    ret = CVMatToTensorBase(imageMat, tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    inputs.push_back(tensorBase);
+    ret = Inference(inputs, outputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::vector<std::vector<MxBase::ObjectInfo>> objInfos;
+    ret = PostProcess(outputs, objInfos);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = WriteResult(objInfos);
+    if (ret != APP_ERR_OK) {
+        LogError << "Save result failed, ret=" << ret << ".";
+        return ret;
+    }
+    imageMat.release();
+    return APP_ERR_OK;
+}
diff --git a/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.h b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.h
new file mode 100644
index 0000000000000000000000000000000000000000..eec33855d38f892895a284b9292ce6b2a14f3c50
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_YOLOV4DETECTIONOPENCV_H
+#define MXBASE_YOLOV4DETECTIONOPENCV_H
+
+#include <vector>
+#include <memory>
+#include <map>
+#include <string>
+#include <opencv2/opencv.hpp>
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "PostProcess/Yolov4MindsporePost.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+extern std::vector<double> g_inferCost;
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string labelPath;
+    bool checkTensor;
+    std::string modelPath;
+    uint32_t classNum;
+    uint32_t biasesNum;
+    std::string biases;
+    std::string objectnessThresh;
+    std::string iouThresh;
+    std::string scoreThresh;
+    uint32_t yoloType;
+    uint32_t modelType;
+    uint32_t inputType;
+    uint32_t anchorDim;
+};
+
+class Yolov4DetectionOpencv {
+ public:
+     APP_ERROR Init(const InitParam &initParam);
+     APP_ERROR DeInit();
+     APP_ERROR ReadImage(const std::string &imgPath, cv::Mat &imageMat);
+     APP_ERROR Resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat);
+     APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase);
+     APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
+     APP_ERROR PostProcess(const std::vector<MxBase::TensorBase> &outputs,
+                          std::vector<std::vector<MxBase::ObjectInfo>> &objInfos);
+     APP_ERROR Process(const std::string &imgPath);
+     APP_ERROR LoadLabels(const std::string &labelPath, std::map<int, std::string> &labelMap);
+     APP_ERROR WriteResult(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos);
+ private:
+     std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
+     std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+     std::shared_ptr<MxBase::Yolov4PostProcess> post_;
+     MxBase::ModelDesc modelDesc_;
+     std::map<int, std::string> labelMap_;
+     uint32_t deviceId_ = 0;
+     uint32_t imageWidth_ = 0;
+     uint32_t imageHeight_ = 0;
+};
+#endif
diff --git a/official/cv/yolov4/infer/mxbase/src/main.cpp b/official/cv/yolov4/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8c5d2298f25717db90ae474fdac5ecc511c5a321
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/main.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <vector>
+#include "Yolov4Detection.h"
+#include "MxBase/Log/Log.h"
+
+std::vector<double> g_inferCost;
+
+void SplitString(const std::string &s, std::vector<std::string> *v, const std::string &c) {
+    std::string::size_type pos1, pos2;
+    pos2 = s.find(c);
+    pos1 = 0;
+    while (std::string::npos != pos2) {
+        v->push_back(s.substr(pos1, pos2 - pos1));
+
+        pos1 = pos2 + c.size();
+        pos2 = s.find(c, pos1);
+    }
+
+    if (pos1 != s.length()) {
+        v->push_back(s.substr(pos1));
+    }
+}
+
+void InitYolov4Param(InitParam *initParam) {
+    initParam->deviceId = 0;
+    initParam->labelPath = "../data/models/coco2017.names";
+    initParam->checkTensor = true;
+    initParam->modelPath = "../data/models/yolov4.om";
+    initParam->classNum = 80;
+    initParam->biasesNum = 18;
+    initParam->biases = "12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401";
+    initParam->objectnessThresh = "0.001";
+    initParam->iouThresh = "0.6";
+    initParam->scoreThresh = "0.001";
+    initParam->yoloType = 3;
+    initParam->modelType = 0;
+    initParam->inputType = 0;
+    initParam->anchorDim = 3;
+}
+
+APP_ERROR ReadImagesPath(const std::string &path, std::vector<std::string> *imagesPath) {
+    std::ifstream inFile;
+    inFile.open(path, std::ios_base::in);
+    std::string line;
+    // Check images path file validity
+    if (inFile.fail()) {
+        LogError << "Failed to open label file: " << path;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    std::vector<std::string> vectorStr;
+    std::string splitStr = " ";
+    // construct label map
+    while (std::getline(inFile, line)) {
+        if (line[0] == '#') {
+            continue;
+        }
+        vectorStr.clear();
+        SplitString(line, &vectorStr, splitStr);
+        imagesPath->push_back(vectorStr[1]);
+    }
+
+    inFile.close();
+    return APP_ERR_OK;
+}
+
+int main(int argc, char* argv[]) {
+    if (argc <= 1) {
+        LogWarn << "Please input image path, such as './yolov4 infer.txt'.";
+        return APP_ERR_OK;
+    }
+    InitParam initParam;
+    InitYolov4Param(&initParam);
+    auto yolov4 = std::make_shared<Yolov4DetectionOpencv>();
+    APP_ERROR ret = yolov4->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "Yolov4DetectionOpencv init failed, ret=" << ret << ".";
+        return ret;
+    }
+    LogInfo << "End to Init yolov4.";
+    std::string inferText = argv[1];
+    std::vector<std::string> imagesPath;
+    ret = ReadImagesPath(inferText, &imagesPath);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImagesPath failed, ret=" << ret << ".";
+        return ret;
+    }
+    for (uint32_t i = 0; i < imagesPath.size(); i++) {
+        LogInfo << "read image path " << imagesPath[i];
+        ret = yolov4->Process(imagesPath[i]);
+        if (ret != APP_ERR_OK) {
+            LogError << "Yolov4DetectionOpencv process failed, ret=" << ret << ".";
+            yolov4->DeInit();
+            return ret;
+        }
+    }
+    yolov4->DeInit();
+    double costSum = 0;
+    for (uint32_t i = 0; i < g_inferCost.size(); i++) {
+        costSum += g_inferCost[i];
+    }
+    LogInfo << "Infer images sum " << g_inferCost.size() << ", cost total time: " << costSum << " ms.";
+    LogInfo << "The throughput: " << g_inferCost.size() * 1000 / costSum << " images/sec.";
+    return APP_ERR_OK;
+}
diff --git a/official/cv/yolov4/infer/sdk/config/yolov4.pipeline b/official/cv/yolov4/infer/sdk/config/yolov4.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..9bdccf1e2464708dcbf8685e58af80aa54693bc8
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/config/yolov4.pipeline
@@ -0,0 +1,65 @@
+{
+    "im_yolov4": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_imagedecoder0"
+        },
+        "mxpi_imagedecoder0": {
+            "props": {
+                "handleMethod": "opencv"
+            },
+            "factory": "mxpi_imagedecoder",
+            "next": "mxpi_imageresize0"
+        },
+        "mxpi_imageresize0": {
+            "props": {
+                "parentName": "mxpi_imagedecoder0",
+                "handleMethod": "opencv",
+                "resizeHeight": "608",
+                "resizeWidth": "608",
+                "resizeType": "Resizer_Stretch"
+            },
+            "factory": "mxpi_imageresize",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "mxpi_imageresize0",
+                "modelPath": "../data/models/yolov4.om",
+                "waitingTime": "3000",
+                "outputDeviceId": "-1"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_objectpostprocessor0"
+        },
+        "mxpi_objectpostprocessor0": {
+            "props": {
+                "dataSource": "mxpi_tensorinfer0",
+                "postProcessConfigPath": "../data/models/yolov4_coco2017_acc_test.cfg",
+                "labelPath": "../data/models/coco2017.names",
+                "postProcessLibPath": "./mxpi/build/libyolov4_mindspore_post.so"
+            },
+            "factory": "mxpi_objectpostprocessor",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_objectpostprocessor0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+    }
+}
diff --git a/official/cv/yolov4/infer/sdk/infersdk.sh b/official/cv/yolov4/infer/sdk/infersdk.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e941bc2513d2747b58b37470ddccc907ddb6c290
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/infersdk.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 object_task_metric.py
+exit 0
diff --git a/official/cv/yolov4/infer/sdk/main.py b/official/cv/yolov4/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..df8f9018c0e894a9b1fe5233c81a20fc2650cd7a
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/main.py
@@ -0,0 +1,121 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import json
+import os
+from StreamManagerApi import StreamManagerApi, MxDataInput
+
+
+def read_file_list(input_file):
+    """
+    :param infer file content:
+        0 xxx/xxx/a.jpg 1920 1080 0 453 369 473 391 1 588 245 608 268
+        1 xxx/xxx/b.jpg 1920 1080 1 466 403 485 422 2 793 300 809 320
+        ...
+    :return image path list
+    """
+    image_file_list = []
+    if not os.path.exists(input_file):
+        print('input file does not exists.')
+    with open(input_file, "r") as fs:
+        for line in fs.readlines():
+            line = line.strip('\n').split(' ')[1]
+            image_file_list.append(line)
+    return image_file_list
+
+
+def save_infer_result(result_dir, result):
+    """
+    save infer result to the file, Write format:
+        Object detected num is 5
+        #Obj: 1, box: 453 369 473 391, confidence: 0.3, label: person, id: 0
+        ...
+    :param result_dir is the dir of save result
+    :param result content bbox and class_id of all object
+    """
+    load_dict = json.loads(result)
+    if load_dict.get('MxpiObject') is None:
+        with open(result_dir + '/result.txt', 'a+') as f_write:
+            f_write.write("")
+    else:
+        res_vec = load_dict.get('MxpiObject')
+        with open(result_dir + '/result.txt', 'a+') as f_write:
+            object_list = 'Object detected num is ' + str(len(res_vec)) + '\n'
+            f_write.writelines(object_list)
+            for index, object_item in enumerate(res_vec):
+                class_info = object_item.get('classVec')[0]
+                object_info = '#Obj: ' + str(index) + ', box: ' + \
+                              str(object_item.get('x0')) + ' ' + \
+                              str(object_item.get('y0')) + ' ' + \
+                              str(object_item.get('x1')) + ' ' + \
+                              str(object_item.get('y1')) + ', confidence: ' + \
+                              str(class_info.get('confidence')) + ', label: ' + \
+                              class_info.get('className') + ', id: ' + \
+                              str(class_info.get('classId')) + '\n'
+                f_write.writelines(object_info)
+
+
+if __name__ == '__main__':
+    # init stream manager
+    stream_manager = StreamManagerApi()
+    ret = stream_manager.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        exit()
+
+    # create streams by pipeline config file
+    with open("./config/yolov4.pipeline", 'rb') as f:
+        pipeline = f.read()
+    ret = stream_manager.CreateMultipleStreams(pipeline)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        exit()
+
+    # Construct the input of the stream
+    data_input = MxDataInput()
+
+    infer_file = './trainval.txt'
+    file_list = read_file_list(infer_file)
+    res_dir_name = 'result'
+    if not os.path.exists(res_dir_name):
+        os.makedirs(res_dir_name)
+
+    for file_path in file_list:
+        print(file_path)
+        file_name = file_path.split('/')[-1]
+        if not (file_name.lower().endswith(".jpg") or file_name.lower().endswith(".jpeg")):
+            continue
+
+        with open(file_path, 'rb') as f:
+            data_input.data = f.read()
+
+        # Inputs data to a specified stream based on streamName.
+        stream_name = b'im_yolov4'
+        inplugin_id = 0
+        unique_id = stream_manager.SendData(stream_name, inplugin_id, data_input)
+        if unique_id < 0:
+            print("Failed to send data to stream.")
+            exit()
+        # Obtain the inference result by specifying streamName and uniqueId.
+        mstimeout = 5000
+        infer_result = stream_manager.GetResult(stream_name, unique_id, mstimeout)
+        if infer_result.errorCode != 0:
+            print("GetResultWithUniqueId error. errorCode=%d, errorMsg=%s" % (
+                infer_result.errorCode, infer_result.data.decode()))
+            exit()
+        save_infer_result(res_dir_name, infer_result.data.decode())
+
+
+    # destroy streams
+    stream_manager.DestroyAllStreams()
diff --git a/official/cv/yolov4/infer/sdk/mxpi/CMakeLists.txt b/official/cv/yolov4/infer/sdk/mxpi/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c23e1fe9049e14f8b834e3a8758a1e5a0bbcc7d6
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/mxpi/CMakeLists.txt
@@ -0,0 +1,38 @@
+cmake_minimum_required(VERSION 3.10.0)
+project(yolov4post)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+set(PLUGIN_NAME "yolov4_mindspore_post")
+set(TARGET_LIBRARY ${PLUGIN_NAME})
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+#message("ACL_LIB_PATH:${ACL_LIB_PATH}.")
+#include_directories(${CMAKE_CURRENT_BINARY_DIR})
+include_directories(${ACL_LIB_PATH}/include)
+
+add_library(${TARGET_LIBRARY} SHARED ../../mxbase/src/PostProcess/Yolov4MindsporePost.cpp
+../../mxbase/src/PostProcess/Yolov4MindsporePost.h)
+
+target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0)
+target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxpidatatype mxbase)
+target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s)
+
+install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION $ENV{MX_SDK_HOME}/lib/modelpostprocessors/)
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/sdk/mxpi/build.sh b/official/cv/yolov4/infer/sdk/mxpi/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..07728e78c907ed8c930f680f56dc5663c872f8bc
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/mxpi/build.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+
+# env
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make && make install);
+    then
+      echo "make failed."
+      return 1
+    fi
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: The plugin of post processor install successfully."
+else
+  echo "ERROR: The plugin of post processor install failed."
+fi
+
+cd - || exit
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/sdk/requirements.txt b/official/cv/yolov4/infer/sdk/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e46171c3419259ca73c0faf3737e0431dded717f
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/requirements.txt
@@ -0,0 +1,3 @@
+opencv-python
+tqdm
+pycocotools
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/sdk/run.sh b/official/cv/yolov4/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7dd7947e5786d0fd61142053f8318d028d1b9803
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/run.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 main.py
+exit 0
diff --git a/official/cv/yolov4/modelarts/modelarts.py b/official/cv/yolov4/modelarts/modelarts.py
new file mode 100644
index 0000000000000000000000000000000000000000..b70331f23004ac13349a5a76cd7ba33f6a64edab
--- /dev/null
+++ b/official/cv/yolov4/modelarts/modelarts.py
@@ -0,0 +1,335 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""YoloV4 train."""
+import argparse
+import os
+import time
+import datetime
+import numpy as np
+
+import mindspore
+from mindspore.context import ParallelMode
+from mindspore.nn.optim.momentum import Momentum
+from mindspore import Tensor
+import mindspore.nn as nn
+from mindspore import context
+from mindspore.communication.management import init, get_rank, get_group_size
+from mindspore.train.serialization import export, load_checkpoint, load_param_into_net
+from mindspore.train.callback import ModelCheckpoint, RunContext
+from mindspore.train.callback import CheckpointConfig
+from mindspore.common import set_seed
+from mindspore.profiler.profiling import Profiler
+
+from src.yolo import YOLOV4CspDarkNet53, YoloWithLossCell, TrainingWrapper
+from src.logger import get_logger
+from src.util import AverageMeter, get_param_groups
+from src.lr_scheduler import get_lr
+from src.yolo_dataset import create_yolo_dataset
+from src.initializer import default_recurisive_init, load_yolov4_params
+from src.eval_utils import apply_eval, EvalCallBack
+
+from model_utils.config import config
+from model_utils.moxing_adapter import moxing_wrapper
+from model_utils.device_adapter import get_device_id, get_device_num
+
+set_seed(1)
+parser = argparse.ArgumentParser(description='YOLOV4')
+parser.add_argument('--enable_modelarts', type=bool, default='True', help='use modelarts')
+parser.add_argument('--data_url', type=str, default='', help='Dataset directory')
+parser.add_argument('--train_url', type=str, default='', help='The path model saved')
+parser.add_argument('--checkpoint_url', type=str, default='', help='The path pre-model saved')
+parser.add_argument('--is_distributed', type=int, default=0, help='do not distributed')
+parser.add_argument('--warmup_epochs', type=int, default=1, help='warmup epoch')
+parser.add_argument('--epoch', type=int, default=1, help='train epoch')
+parser.add_argument('--training_shape', type=int, default=416, help='training shape')
+args_opt, _ = parser.parse_known_args()
+
+def set_default():
+    os.makedirs(config.output_path, exist_ok=True)
+    os.makedirs(config.data_path, exist_ok=True)
+
+    config.run_eval = True
+    config.eval_start_epoch = 0
+    config.max_epoch = args_opt.epoch
+    config.warmup_epochs = args_opt.warmup_epochs
+    config.is_distributed = args_opt.is_distributed
+    config.enable_modelarts = args_opt.enable_modelarts
+    config.checkpoint_url = args_opt.checkpoint_url
+    config.pretrained_backbone = args_opt.checkpoint_url
+    config.training_shape = args_opt.training_shape
+    config.per_batch_size = 1
+    config.file_name = os.path.join(args_opt.train_url, "yolov4")
+    if config.lr_scheduler == 'cosine_annealing' and config.max_epoch > config.t_max:
+        config.t_max = config.max_epoch
+
+    config.lr_epochs = list(map(int, config.lr_epochs.split(',')))
+    config.data_root = os.path.join(args_opt.data_url, 'train2017')
+    config.annFile = os.path.join(args_opt.data_url, 'annotations/instances_train2017.json')
+
+    config.data_val_root = os.path.join(args_opt.data_url, 'val2017')
+    config.ann_val_file = os.path.join(args_opt.data_url, 'annotations/instances_val2017.json')
+
+    device_id = int(os.getenv('DEVICE_ID', '0'))
+    context.set_context(mode=context.GRAPH_MODE,
+                        device_target=config.device_target, save_graphs=False, device_id=device_id)
+
+    if config.need_profiler:
+        profiler = Profiler(output_path=config.checkpoint_url, is_detail=True, is_show_op_path=True)
+    else:
+        profiler = None
+
+    # init distributed
+    if config.is_distributed:
+        init()
+        config.rank = get_rank()
+        config.group_size = get_group_size()
+    else:
+        config.rank = 0
+        config.group_size = 1
+
+    # select for master rank save ckpt or all rank save, compatible for model parallel
+    config.rank_save_ckpt_flag = 0
+    if config.is_save_on_master:
+        if config.rank == 0:
+            config.rank_save_ckpt_flag = 1
+    else:
+        config.rank_save_ckpt_flag = 1
+
+    # logger
+    config.outputs_dir = os.path.join(args_opt.train_url,
+                                      datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
+    config.logger = get_logger(config.outputs_dir, config.rank)
+    config.logger.save_args(config)
+
+    return profiler
+
+
+class InternalCallbackParam(dict):
+    """Internal callback object's parameters."""
+
+    def __getattr__(self, key):
+        return self[key]
+
+    def __setattr__(self, key, value):
+        self[key] = value
+
+
+class BuildTrainNetwork(nn.Cell):
+    def __init__(self, network_, criterion):
+        super(BuildTrainNetwork, self).__init__()
+        self.network = network_
+        self.criterion = criterion
+
+    def construct(self, input_data, label):
+        output = self.network(input_data)
+        loss_ = self.criterion(output, label)
+        return loss_
+
+
+def modelarts_pre_process():
+    '''modelarts pre process function.'''
+    def unzip(zip_file, save_dir):
+        import zipfile
+        s_time = time.time()
+        if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
+            zip_isexist = zipfile.is_zipfile(zip_file)
+            if zip_isexist:
+                fz = zipfile.ZipFile(zip_file, 'r')
+                data_num = len(fz.namelist())
+                print("Extract Start...")
+                print("unzip file num: {}".format(data_num))
+                data_print = int(data_num / 100) if data_num > 100 else 1
+                i = 0
+                for file in fz.namelist():
+                    if i % data_print == 0:
+                        print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
+                    i += 1
+                    fz.extract(file, save_dir)
+                print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
+                                                     int(int(time.time() - s_time) % 60)))
+                print("Extract Done.")
+            else:
+                print("This is not zip.")
+        else:
+            print("Zip has been extracted.")
+
+    if config.need_modelarts_dataset_unzip:
+        zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
+        save_dir_1 = os.path.join(config.data_path)
+
+        sync_lock = "/tmp/unzip_sync.lock"
+
+        # Each server contains 8 devices as most.
+        if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
+            print("Zip file path: ", zip_file_1)
+            print("Unzip file save dir: ", save_dir_1)
+            unzip(zip_file_1, save_dir_1)
+            print("===Finish extract data synchronization===")
+            try:
+                os.mknod(sync_lock)
+            except IOError:
+                pass
+
+        while True:
+            if os.path.exists(sync_lock):
+                break
+            time.sleep(1)
+
+        print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
+
+    config.ckpt_path = os.path.join(config.output_path, config.ckpt_path)
+
+
+def get_network(net, cfg, learning_rate):
+    opt = Momentum(params=get_param_groups(net),
+                   learning_rate=Tensor(learning_rate),
+                   momentum=cfg.momentum,
+                   weight_decay=cfg.weight_decay,
+                   loss_scale=cfg.loss_scale)
+    net = TrainingWrapper(net, opt)
+    net.set_train()
+    return net
+
+
+@moxing_wrapper(pre_process=modelarts_pre_process)
+def run_train():
+
+    profiler = set_default()
+    loss_meter = AverageMeter('loss')
+    context.reset_auto_parallel_context()
+    parallel_mode = ParallelMode.STAND_ALONE
+    degree = 1
+    if config.is_distributed:
+        parallel_mode = ParallelMode.DATA_PARALLEL
+        degree = get_group_size()
+    context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=degree)
+
+    network = YOLOV4CspDarkNet53()
+    if config.run_eval:
+        network_eval = network
+    # default is kaiming-normal
+    default_recurisive_init(network)
+    load_yolov4_params(config, network)
+
+    network = YoloWithLossCell(network)
+    config.logger.info('finish get network')
+
+    ds, data_size = create_yolo_dataset(image_dir=config.data_root, anno_path=config.annFile, is_training=True,
+                                        batch_size=config.per_batch_size, max_epoch=config.max_epoch,
+                                        device_num=config.group_size, rank=config.rank, default_config=config)
+    config.logger.info('Finish loading dataset')
+
+    config.steps_per_epoch = int(data_size / config.per_batch_size / config.group_size)
+
+    if config.ckpt_interval <= 0: config.ckpt_interval = config.steps_per_epoch
+
+    lr = get_lr(config)
+    network = get_network(network, config, lr)
+    network.set_train(True)
+
+    if config.rank_save_ckpt_flag or config.run_eval:
+        cb_params = InternalCallbackParam()
+        cb_params.train_network = network
+        cb_params.epoch_num = config.max_epoch * config.steps_per_epoch // config.ckpt_interval
+        cb_params.cur_epoch_num = 1
+        run_context = RunContext(cb_params)
+
+    if config.rank_save_ckpt_flag:
+        # checkpoint save
+        ckpt_max_num = 10
+        ckpt_config = CheckpointConfig(save_checkpoint_steps=config.ckpt_interval,
+                                       keep_checkpoint_max=ckpt_max_num)
+        save_ckpt_path = os.path.join(config.outputs_dir, 'ckpt_' + str(config.rank) + '/')
+        ckpt_cb = ModelCheckpoint(config=ckpt_config, directory=save_ckpt_path, prefix='{}'.format(config.rank))
+        ckpt_cb.begin(run_context)
+
+    if config.run_eval:
+        data_val_root = config.data_val_root
+        ann_val_file = config.ann_val_file
+        save_ckpt_path = os.path.join(config.outputs_dir, 'ckpt_' + str(config.rank) + '/')
+        input_val_shape = Tensor(tuple(config.test_img_shape), mindspore.float32)
+        # init detection engine
+        eval_dataset, eval_data_size = create_yolo_dataset(data_val_root, ann_val_file, is_training=False,
+                                                           batch_size=1, max_epoch=1, device_num=1,
+                                                           rank=0, shuffle=False, default_config=config)
+        eval_param_dict = {"net": network_eval, "dataset": eval_dataset, "data_size": eval_data_size,
+                           "anno_json": ann_val_file, "input_shape": input_val_shape, "args": config}
+        eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=config.eval_interval,
+                               eval_start_epoch=config.eval_start_epoch, save_best_ckpt=True,
+                               ckpt_directory=save_ckpt_path, besk_ckpt_name="best_map.ckpt", metrics_name="mAP")
+
+    old_progress = -1
+    t_end = time.time()
+    data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=1)
+
+    for i, data in enumerate(data_loader):
+        images = data["image"]
+        input_shape = images.shape[2:4]
+        config.logger.info('iter[%d], shape%d', i + 1, input_shape[0])
+
+        images = Tensor.from_numpy(images)
+        batch_y_true_0 = Tensor.from_numpy(data['bbox1'])
+        batch_y_true_1 = Tensor.from_numpy(data['bbox2'])
+        batch_y_true_2 = Tensor.from_numpy(data['bbox3'])
+        batch_gt_box0 = Tensor.from_numpy(data['gt_box1'])
+        batch_gt_box1 = Tensor.from_numpy(data['gt_box2'])
+        batch_gt_box2 = Tensor.from_numpy(data['gt_box3'])
+
+        input_shape = Tensor(tuple(input_shape[::-1]), mindspore.float32)
+        loss = network(images, batch_y_true_0, batch_y_true_1, batch_y_true_2, batch_gt_box0, batch_gt_box1,
+                       batch_gt_box2, input_shape)
+        loss_meter.update(loss.asnumpy())
+
+        # ckpt progress
+        if config.rank_save_ckpt_flag:
+            cb_params.cur_step_num = i + 1  # current step number
+            cb_params.batch_num = i + 2
+            ckpt_cb.step_end(run_context)
+
+        if (i + 1) % config.log_interval == 0:
+            time_used = time.time() - t_end
+            epoch = int((i + 1) / config.steps_per_epoch)
+            fps = config.per_batch_size * (i - old_progress) * config.group_size / time_used
+            if config.rank == 0:
+                config.logger.info('epoch[{}], iter[{}], {}, per step time: {:.2f} ms, fps: {:.2f}, lr:{}'.format(
+                    epoch, i, loss_meter, 1000 * time_used / (i - old_progress), fps, lr[i]))
+            t_end = time.time()
+            loss_meter.reset()
+            old_progress = i
+
+        if (i + 1) % config.steps_per_epoch == 0 and (config.run_eval or config.rank_save_ckpt_flag):
+            if config.run_eval:
+                eval_cb.epoch_end(run_context)
+                network.set_train()
+            cb_params.cur_epoch_num += 1
+
+        if config.need_profiler and profiler is not None:
+            if i == 10:
+                profiler.analyse()
+                break
+
+    ckpt_path = os.path.join(config.outputs_dir, 'ckpt_' + str(config.rank), '0-1_117266.ckpt')
+
+    network_export = YOLOV4CspDarkNet53()
+    network_export.set_train(False)
+
+    param_dict = load_checkpoint(ckpt_path)
+    load_param_into_net(network_export, param_dict)
+    input_data = Tensor(np.zeros([config.batch_size, 3, config.testing_shape, config.testing_shape]), mindspore.float32)
+
+    export(network_export, input_data, file_name=config.file_name, file_format="AIR")
+
+if __name__ == "__main__":
+    run_train()
diff --git a/research/cv/EDSR/infer/convert/aipp_edsr_opencv.cfg b/research/cv/EDSR/infer/convert/aipp_edsr_opencv.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..931dbfeda97c5be23496361265e067cd4277ff3b
--- /dev/null
+++ b/research/cv/EDSR/infer/convert/aipp_edsr_opencv.cfg
@@ -0,0 +1,5 @@
+aipp_op {
+aipp_mode:static
+input_format:RGB888_U8
+}
+
diff --git a/research/cv/EDSR/infer/convert/convert_om.sh b/research/cv/EDSR/infer/convert/convert_om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a57ec0d1b6ffb40da082313c6aa59116f6d6186d
--- /dev/null
+++ b/research/cv/EDSR/infer/convert/convert_om.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+model_path=$1
+aipp_cfg_path=$2
+output_model_name=$3
+
+atc \
+--model=$model_path \
+--input_format=NCHW \
+--framework=1 \
+--output=$output_model_name \
+--log=error \
+--soc_version=Ascend310 \
+--insert_op_conf=$aipp_cfg_path
diff --git a/research/cv/EDSR/infer/data/config/edsr.pipeline b/research/cv/EDSR/infer/data/config/edsr.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..ea3c2a13d17509f040ca2b5abff7bec6a33a5536
--- /dev/null
+++ b/research/cv/EDSR/infer/data/config/edsr.pipeline
@@ -0,0 +1,28 @@
+{
+        "edsr_superResolution": {
+            "stream_config": {
+                "deviceId": "0"
+            },
+            "appsrc0": {
+                "props": {
+                    "blocksize": "409600"
+                },
+                "factory": "appsrc",
+                "next": "mxpi_tensorinfer0"
+            },
+            "mxpi_tensorinfer0": {
+                "props": {
+                    "dataSource": "appsrc0",
+                    "modelPath": "../model/edsr.om"
+                },
+                "factory": "mxpi_tensorinfer",
+                "next": "appsink0"
+            },
+            "appsink0": {
+                "props": {
+                    "blocksize": "409600"
+                },
+                "factory": "appsink"
+            }
+        }
+    }
\ No newline at end of file
diff --git a/research/cv/EDSR/infer/docker_start_infer.sh b/research/cv/EDSR/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d8cf649151b2fe5c8b6ae2c8aa9c0c296b2d9778
--- /dev/null
+++ b/research/cv/EDSR/infer/docker_start_infer.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+#coding = utf-8
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the BSD 3-Clause License  (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://opensource.org/licenses/BSD-3-Clause
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_dir=$2
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image data_dir"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${data_dir}" ]; then
+        echo "please input data_dir"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it \
+  --device=/dev/davinci0 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${data_dir}:${data_dir} \
+  ${docker_image} \
+  /bin/bash
diff --git a/research/cv/EDSR/infer/mxbase/CMakeLists.txt b/research/cv/EDSR/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1538ccd9dbdfaef45688962469df562405447cb1
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,55 @@
+cmake_minimum_required(VERSION 3.14.0)
+project(edsr)
+
+set(TARGET edsr)
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+#add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
+
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall
+-Dgoogle=mindxsdk_private -D_GLIBCXX_USE_CXX11_ABI=0)
+
+
+#Check environment variable
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+if(NOT DEFINED ENV{ASCEND_VERSION})
+    message(WARNING "please define environment variable:ASCEND_VERSION")
+endif()
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include)
+set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64)
+
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/postprocess/include)
+
+
+if(DEFINED ENV{MXSDK_OPENSOURCE_DIR})
+    set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
+else()
+    set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource)
+endif()
+
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+add_executable(${TARGET} main.cpp EdsrSuperresolution.cpp)
+target_link_libraries(${TARGET} glog cpprest mxbase opencv_world)
+
+install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
diff --git a/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.cpp b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1016326d41bc03837732c47a46d27b548a53dc98
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.cpp
@@ -0,0 +1,200 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "EdsrSuperresolution.h"
+
+#include <memory>
+#include <vector>
+#include <string>
+
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/Log/Log.h"
+
+
+namespace localParameter {
+    const uint32_t VECTOR_FIRST_INDEX = 0;
+    const uint32_t VECTOR_SECOND_INDEX = 1;
+    const uint32_t VECTOR_THIRD_INDEX = 2;
+    const uint32_t VECTOR_FOURTH_INDEX = 3;
+    const uint32_t VECTOR_FIFTH_INDEX = 4;
+}
+
+APP_ERROR EdsrSuperresolution::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    uint32_t outputModelHeight = modelDesc_.outputTensors[0].tensorDims[localParameter::VECTOR_THIRD_INDEX];
+    uint32_t inputModelHeight = modelDesc_.inputTensors[0].tensorDims[localParameter::VECTOR_SECOND_INDEX];
+    uint32_t inputModelWidth = modelDesc_.inputTensors[0].tensorDims[localParameter::VECTOR_THIRD_INDEX];
+
+    scale_ = outputModelHeight/inputModelHeight;
+    maxEdge_ = inputModelWidth > inputModelHeight ? inputModelWidth:inputModelHeight;
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::DeInit() {
+    model_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::ReadImage(const std::string &imgPath, cv::Mat *imageMat) {
+    *imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    imageWidth_ = imageMat->cols;
+    imageHeight_ = imageMat->rows;
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::PaddingImage(cv::Mat *imageSrc, cv::Mat *imageDst, const uint32_t &targetLength) {
+    uint32_t padding_h = targetLength - imageHeight_;
+    uint32_t padding_w = targetLength - imageWidth_;
+    cv::copyMakeBorder(*imageSrc, *imageDst, 0, padding_h, 0, padding_w, cv::BORDER_CONSTANT, 0);
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR EdsrSuperresolution::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase) {
+    const uint32_t dataSize = imageMat.cols * imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU;
+
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+
+    MxBase::MemoryData memoryDataSrc(imageMat.data, dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+
+    std::vector<uint32_t> shape = {imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(imageMat.cols)};
+    *tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT8);
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::Inference(std::vector<MxBase::TensorBase> *inputs,
+                                      std::vector<MxBase::TensorBase> *outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs->push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    APP_ERROR ret = model_->ModelInference(*inputs, *outputs, dynamicInfo);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR EdsrSuperresolution::PostProcess(std::vector<MxBase::TensorBase> *inputs, cv::Mat *imageMat) {
+        MxBase::TensorBase tensor = *inputs->begin();
+        int ret = tensor.ToHost();
+        if (ret != APP_ERR_OK) {
+            LogError << GetError(ret) << "Tensor deploy to host failed.";
+            return ret;
+        }
+        uint32_t outputModelChannel = tensor.GetShape()[localParameter::VECTOR_SECOND_INDEX];
+        uint32_t outputModelHeight = tensor.GetShape()[localParameter::VECTOR_THIRD_INDEX];
+        uint32_t outputModelWidth = tensor.GetShape()[localParameter::VECTOR_FOURTH_INDEX];
+        LogInfo << "Channel:" << outputModelChannel << " Height:" << outputModelHeight << " Width:" << outputModelWidth;
+
+        uint32_t finalHeight = imageHeight_ * scale_;
+        uint32_t finalWidth = imageWidth_ * scale_;
+        cv::Mat output(finalHeight, finalWidth, CV_32FC3);
+
+        auto data = reinterpret_cast<float(*)[outputModelChannel]
+        [outputModelHeight][outputModelWidth]>(tensor.GetBuffer());
+
+        for (size_t c = 0; c < outputModelChannel; ++c) {
+            for (size_t x = 0; x < finalHeight; ++x) {
+                for (size_t y = 0; y < finalWidth; ++y) {
+                    output.at<cv::Vec3f>(x, y)[c] = data[0][c][x][y];
+                }
+            }
+        }
+
+        *imageMat = output;
+        return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::Process(const std::string &imgPath) {
+    cv::Mat imageMat;
+    APP_ERROR ret = ReadImage(imgPath, &imageMat);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImage failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    PaddingImage(&imageMat, &imageMat, maxEdge_);
+    MxBase::TensorBase tensorBase;
+    ret = CVMatToTensorBase(imageMat, &tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+
+
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs = {};
+    inputs.push_back(tensorBase);
+    ret = Inference(&inputs, &outputs);
+
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    cv::Mat output;
+    ret = PostProcess(&outputs, &output);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::string resultPath = imgPath;
+    size_t pos = resultPath.find_last_of(".");
+    resultPath.replace(resultPath.begin() + pos, resultPath.end(), "_infer.png");
+    cv::imwrite(resultPath, output);
+    return APP_ERR_OK;
+}
diff --git a/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.h b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.h
new file mode 100644
index 0000000000000000000000000000000000000000..36b1ab9cf70a29997fd323597af23cf8695f8f9a
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.h
@@ -0,0 +1,54 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EDSR_SUPERRESULOTION_H
+#define EDSR_SUPERRESULOTION_H
+
+#include <memory>
+#include <vector>
+#include <string>
+#include <opencv2/opencv.hpp>
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/PostProcessBases/PostProcessDataType.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string modelPath;
+};
+
+class EdsrSuperresolution {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+    APP_ERROR DeInit();
+    APP_ERROR ReadImage(const std::string &imgPath, cv::Mat *imageMat);
+    APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase);
+    APP_ERROR Inference(std::vector<MxBase::TensorBase> *inputs, std::vector<MxBase::TensorBase> *outputs);
+    APP_ERROR Process(const std::string &imgPath);
+    APP_ERROR PostProcess(std::vector<MxBase::TensorBase> *inputs, cv::Mat *imageMat);
+    APP_ERROR PaddingImage(cv::Mat *imageSrc, cv::Mat *imageDst, const uint32_t &targetLength);
+
+ private:
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    MxBase::ModelDesc modelDesc_;
+    uint32_t deviceId_ = 0;
+    uint32_t scale_ = 0;
+    uint32_t imageWidth_ = 0;
+    uint32_t imageHeight_ = 0;
+    uint32_t maxEdge_ = 0;
+};
+
+#endif
diff --git a/research/cv/EDSR/infer/mxbase/build.sh b/research/cv/EDSR/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..566c6461ee1262c032d8c133a882d119e98755ec
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/build.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+
+# env
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make);
+    then
+      echo "make failed."
+      return 1
+    fi
+
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
diff --git a/research/cv/EDSR/infer/mxbase/main.cpp b/research/cv/EDSR/infer/mxbase/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cb3401c0d17b08e7a707539d38a2f394535b1a4b
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/main.cpp
@@ -0,0 +1,46 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "EdsrSuperresolution.h"
+#include "MxBase/Log/Log.h"
+
+
+// infer an image
+int main(int argc, char *argv[]) {
+    if (argc <= 1) {
+        LogWarn << "Please input image path, such as './test.png'";
+        return APP_ERR_OK;
+    }
+    InitParam initParam = {};
+    initParam.deviceId = 0;
+    initParam.modelPath = "../model/edsr.om";
+    EdsrSuperresolution esdrSR;
+    APP_ERROR ret = esdrSR.Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "EdsrSuperresolution init failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::string imgPath = argv[1];
+    ret = esdrSR.Process(imgPath);
+    if (ret != APP_ERR_OK) {
+        LogError << "EdsrSuperresolution process failed, ret=" << ret << ".";
+        esdrSR.DeInit();
+        return ret;
+    }
+
+    esdrSR.DeInit();
+    return APP_ERR_OK;
+}
diff --git a/research/cv/EDSR/infer/sdk/eval.py b/research/cv/EDSR/infer/sdk/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..57242b243db2b2e8fe214eccbc15724e68318278
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/eval.py
@@ -0,0 +1,70 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""eval for sdk infer"""
+import argparse
+import os
+import math
+import cv2
+import numpy as np
+
+def parser_args():
+    """parse arguments"""
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--label_dir", type=str, default="../data/DIV2K/label/",
+                        help="path of label images directory")
+    parser.add_argument("--infer_dir", type=str, default=" ../data/sdk_out",
+                        help="path of infer images directory")
+    parser.add_argument("--scale", type=int, default=2)
+    return parser.parse_args()
+
+
+def calc_psnr(sr, hr, scale, rgb_range):
+    """calculate psnr"""
+    hr = np.float32(hr)
+    sr = np.float32(sr)
+    diff = (sr - hr) / rgb_range
+    gray_coeffs = np.array([65.738, 129.057, 25.064]).reshape((1, 3, 1, 1)) / 256
+    diff = np.multiply(diff, gray_coeffs).sum(1)
+    if hr.size == 1:
+        return 0
+    if scale != 1:
+        shave = scale
+    else:
+        shave = scale + 6
+    if scale == 1:
+        valid = diff
+    else:
+        valid = diff[..., shave:-shave, shave:-shave]
+    mse = np.mean(pow(valid, 2))
+    return -10 * math.log10(mse)
+
+
+if __name__ == '__main__':
+    args = parser_args()
+    infer_path_list = os.listdir(args.infer_dir)
+    total_num = len(infer_path_list)
+    mean_psnr = 0.0
+    for infer_p in infer_path_list:
+        infer_path = os.path.join(args.infer_dir, infer_p)
+        label_path = os.path.join(args.label_dir, infer_p.replace('_infer', ''))
+        infer_img = cv2.imread(infer_path)
+        h, w = infer_img.shape[:2]
+        label_img = cv2.imread(label_path)[0:h, 0:w]
+        infer_img = np.expand_dims(infer_img, 0).transpose((0, 3, 1, 2))
+        label_img = np.expand_dims(label_img, 0).transpose((0, 3, 1, 2))
+        psnr = calc_psnr(infer_img, label_img, args.scale, 255.0)
+        mean_psnr += psnr/total_num
+        print("current psnr: ", psnr)
+    print('Mean psnr of %s images is %.4f' % (total_num, mean_psnr))
diff --git a/research/cv/EDSR/infer/sdk/main.py b/research/cv/EDSR/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..6932b57b5e0fd0c7fbd9641f31c0e1ee8a0ab0a1
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/main.py
@@ -0,0 +1,43 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""run sdk infer"""
+import argparse
+import os
+from sr_infer_wrapper import SRInferWrapper
+
+def parser_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--input_dir", type=str, default="../data/DIV2K/input/",
+                        help="path of input images directory")
+    parser.add_argument("--pipeline_path", type=str, default="../data/config/edsr.pipeline",
+                        help="path of pipeline file")
+    parser.add_argument("--output_dir", type=str, default="../data/sdk_out/",
+                        help="path of output images directory")
+    return parser.parse_args()
+
+
+if __name__ == '__main__':
+    args = parser_args()
+    sr_infer = SRInferWrapper()
+    sr_infer.load_pipeline(args.pipeline_path)
+    path_list = os.listdir(args.input_dir)
+    path_list.sort()
+    if not os.path.exists(args.output_dir):
+        os.makedirs(args.output_dir)
+
+    for img_path in path_list:
+        print(img_path)
+        res = sr_infer.do_infer(os.path.join(args.input_dir, img_path))
+        res.save(os.path.join(args.output_dir, img_path.replace('x2', '_infer')))
diff --git a/research/cv/EDSR/infer/sdk/run.sh b/research/cv/EDSR/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f1b9ff91f97c47c2954940b10c47b9e896ff1622
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/run.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+# The number of parameters must be 2.
+if [ $# -ne 3 ]
+then
+  echo "Wrong parameter format."
+  echo "Usage:"
+  echo "         bash $0 [INPUT_PATH] [PIPELINE_PATH] [OUTPUT_PATH]"
+  echo "Example: "
+  echo "         bash run.sh ../data/DIV2K/input/ ../data/config/edsr.pipeline ../data/sdk_out/"
+
+  exit 1
+fi
+
+# The path of a folder containing eval images.
+input_dir=$1
+# The path of pipeline file.
+pipeline_path=$2
+# The path of a folder used to store all results.
+output_dir=$3
+
+
+if [ ! -d $input_dir ]
+then
+  echo "Please input the correct directory containing images."
+  exit
+fi
+
+if [ ! -d $output_dir ]
+then
+  mkdir -p $output_dir
+fi
+
+set -e
+
+CUR_PATH=$(cd "$(dirname "$0")" || { warn "Failed to check path/to/run.sh" ; exit ; } ; pwd)
+echo "enter $CUR_PATH"
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+if [ ! "${MX_SDK_HOME}" ]
+then
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+fi
+
+if [ ! "${MX_SDK_HOME}" ]
+then
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+fi
+
+python3  main.py  --input_dir=$input_dir \
+                  --pipeline_path=$pipeline_path \
+                  --output_dir=$output_dir \
+                  
+exit 0
\ No newline at end of file
diff --git a/research/cv/EDSR/infer/sdk/sr_infer_wrapper.py b/research/cv/EDSR/infer/sdk/sr_infer_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..2631e2e8849fbbd8f14d72fa784f606952c99e89
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/sr_infer_wrapper.py
@@ -0,0 +1,125 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""super resolution infer wrapper"""
+import json
+import numpy as np
+from PIL import Image
+import cv2
+from StreamManagerApi import StreamManagerApi, StringVector, InProtobufVector, MxProtobufIn
+import MxpiDataType_pb2 as MxpiDataType
+
+DEFAULT_IMAGE_WIDTH = 1020
+DEFAULT_IMAGE_HEIGHT = 1020
+CHANNELS = 3
+SCALE = 2
+
+def padding(img, target_shape):
+    h, w = target_shape[0], target_shape[1]
+    img_h, img_w, _ = img.shape
+    dh, dw = h - img_h, w - img_w
+    if dh < 0 or dw < 0:
+        raise RuntimeError(f"target_shape is bigger than img.shape, {target_shape} > {img.shape}")
+    if dh != 0 or dw != 0:
+        img = np.pad(img, ((0, int(dh)), (0, int(dw)), (0, 0)), "reflect")
+    return img
+
+
+
+def unpadding(img, target_shape):
+    h, w = target_shape[0], target_shape[1]
+    img_h, img_w, _ = img.shape
+    if img_h > h:
+        img = img[:h, :, :]
+    if img_w > w:
+        img = img[:, :w, :]
+    return img
+
+
+
+class SRInferWrapper:
+    """super resolution infer wrapper"""
+    def __init__(self):
+        self.stream_name = None
+        self.streamManagerApi = StreamManagerApi()
+        # init stream manager
+        if self.streamManagerApi.InitManager() != 0:
+            raise RuntimeError("Failed to init stream manager.")
+
+    def load_pipeline(self, pipeline_path):
+        # create streams by pipeline config file
+        with open(pipeline_path, 'r') as f:
+            pipeline = json.load(f)
+        self.stream_name = list(pipeline.keys())[0].encode()
+        pipelineStr = json.dumps(pipeline).encode()
+        if self.streamManagerApi.CreateMultipleStreams(pipelineStr) != 0:
+            raise RuntimeError("Failed to create stream.")
+
+    def do_infer(self, image_path):
+        """do infer process"""
+        # construct the input of the stream
+        image = cv2.imread(image_path)
+        ori_h, ori_w, _ = image.shape
+        image = padding(image, (DEFAULT_IMAGE_HEIGHT, DEFAULT_IMAGE_WIDTH))
+        tensor_pkg_list = MxpiDataType.MxpiTensorPackageList()
+        tensor_pkg = tensor_pkg_list.tensorPackageVec.add()
+        tensor_vec = tensor_pkg.tensorVec.add()
+        tensor_vec.deviceId = 0
+        tensor_vec.memType = 0
+
+        for dim in [1, *image.shape]:
+            tensor_vec.tensorShape.append(dim)
+
+        input_data = image.tobytes()
+        tensor_vec.dataStr = input_data
+        tensor_vec.tensorDataSize = len(input_data)
+
+        protobuf_vec = InProtobufVector()
+        protobuf = MxProtobufIn()
+        protobuf.key = b'appsrc0'
+        protobuf.type = b'MxTools.MxpiTensorPackageList'
+        protobuf.protobuf = tensor_pkg_list.SerializeToString()
+        protobuf_vec.push_back(protobuf)
+
+        unique_id = self.streamManagerApi.SendProtobuf(
+            self.stream_name, 0, protobuf_vec)
+        if unique_id < 0:
+            raise RuntimeError("Failed to send data to stream.")
+
+        # get plugin output data
+        key = b"mxpi_tensorinfer0"
+        keyVec = StringVector()
+        keyVec.push_back(key)
+        inferResult = self.streamManagerApi.GetProtobuf(self.stream_name, 0, keyVec)
+        if inferResult.size() == 0:
+            raise RuntimeError("inferResult is null")
+        if inferResult[0].errorCode != 0:
+            raise RuntimeError("GetProtobuf error. errorCode=%d, errorMsg=%s" % (
+                inferResult[0].errorCode, inferResult[0].messageName.decode()))
+
+        # get the infer result
+        inferList0 = MxpiDataType.MxpiTensorPackageList()
+        inferList0.ParseFromString(inferResult[0].messageBuf)
+        inferVisionData = inferList0.tensorPackageVec[0].tensorVec[0].dataStr
+
+        # converting the byte data into 32 bit float array
+        output_img_data = np.frombuffer(inferVisionData, dtype=np.float32)
+        output_img_data = np.clip(output_img_data, 0, 255)
+        output_img_data = np.round(output_img_data).astype(np.uint8)
+        output_img_data = np.reshape(output_img_data, (CHANNELS, SCALE*DEFAULT_IMAGE_HEIGHT, SCALE*DEFAULT_IMAGE_WIDTH))
+        output_img_data = output_img_data.transpose((1, 2, 0))
+        output_img_data = unpadding(output_img_data, (SCALE*ori_h, SCALE*ori_w))
+        result = Image.fromarray(output_img_data[..., ::-1])
+
+        return result
diff --git a/research/cv/EDSR/modelarts/train_start.py b/research/cv/EDSR/modelarts/train_start.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c3a1b019e0a13354000af8728b3cb95e1a41cff
--- /dev/null
+++ b/research/cv/EDSR/modelarts/train_start.py
@@ -0,0 +1,127 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train edsr om modelarts"""
+import argparse
+import os
+import subprocess
+import moxing as mox
+
+
+_CACHE_DATA_URL = "/cache/data_url"
+_CACHE_TRAIN_URL = "/cache/train_url"
+
+def _parse_args():
+    """parse arguments"""
+    parser = argparse.ArgumentParser(description='train and export edsr on modelarts')
+    # train output path
+    parser.add_argument('--train_url', type=str, default='', help='where training log and ckpts saved')
+    # dataset dir
+    parser.add_argument('--data_url', type=str, default='', help='where training log and ckpts saved')
+    # train config
+    parser.add_argument('--data_train', type=str, default='DIV2K', help='train dataset name')
+    parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train')
+    parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training')
+    parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
+    parser.add_argument('--init_loss_scale', type=float, default=65536., help='scaling factor')
+    parser.add_argument('--loss_scale', type=float, default=1024.0, help='loss_scale')
+    parser.add_argument('--scale', type=str, default='2', help='super resolution scale')
+    parser.add_argument('--ckpt_save_path', type=str, default='ckpt', help='path to save ckpt')
+    parser.add_argument('--ckpt_save_interval', type=int, default=10, help='save ckpt frequency, unit is epoch')
+    parser.add_argument('--ckpt_save_max', type=int, default=5, help='max number of saved ckpt')
+    parser.add_argument('--task_id', type=int, default=0)
+    # export config
+    parser.add_argument("--export_batch_size", type=int, default=1, help="batch size")
+    parser.add_argument("--export_file_name", type=str, default="edsr", help="output file name.")
+    parser.add_argument("--export_file_format", type=str, default="AIR",
+                        choices=['MINDIR', 'AIR', 'ONNX'], help="file format")
+    args, _ = parser.parse_known_args()
+
+    return args
+
+
+def _train(args, data_url):
+    """use train.py"""
+    pwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    train_file = os.path.join(pwd, "train.py")
+
+    cmd = ["python", train_file,
+           f"--dir_data={os.path.abspath(data_url)}",
+           f"--data_train={args.data_train}",
+           f"--epochs={args.epochs}",
+           f"--batch_size={args.batch_size}",
+           f"--lr={args.lr}",
+           f"--init_loss_scale={args.init_loss_scale}",
+           f"--loss_scale={args.loss_scale}",
+           f"--scale={args.scale}",
+           f"--task_id={args.task_id}",
+           f"--ckpt_save_path={os.path.join(_CACHE_TRAIN_URL,args.ckpt_save_path)}",
+           f"--ckpt_save_interval={args.ckpt_save_interval}",
+           f"--ckpt_save_max={args.ckpt_save_max}"]
+
+    print(' '.join(cmd))
+    process = subprocess.Popen(cmd, shell=False)
+    return process.wait()
+
+def _get_last_ckpt(ckpt_dir):
+    """get the last ckpt path"""
+    file_dict = {}
+    lists = os.listdir(ckpt_dir)
+    if not lists:
+        print("No ckpt file found.")
+        return None
+    for i in lists:
+        ctime = os.stat(os.path.join(ckpt_dir, i)).st_ctime
+        file_dict[ctime] = i
+    max_ctime = max(file_dict.keys())
+    ckpt_file = os.path.join(ckpt_dir, file_dict[max_ctime])
+
+    return ckpt_file
+
+
+
+def _export_air(args, ckpt_dir):
+    """export"""
+    ckpt_file = _get_last_ckpt(ckpt_dir)
+    if not ckpt_file:
+        return
+    pwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    export_file = os.path.join(pwd, "export.py")
+    cmd = ["python", export_file,
+           f"--batch_size={args.export_batch_size}",
+           f"--ckpt_path={ckpt_file}",
+           f"--file_name={os.path.join(_CACHE_TRAIN_URL, args.export_file_name)}",
+           f"--file_format={args.export_file_format}",]
+    print(f"Start exporting, cmd = {' '.join(cmd)}.")
+    process = subprocess.Popen(cmd, shell=False)
+    process.wait()
+
+
+def main():
+    args = _parse_args()
+
+    os.makedirs(_CACHE_TRAIN_URL, exist_ok=True)
+    os.makedirs(_CACHE_DATA_URL, exist_ok=True)
+
+    mox.file.copy_parallel(args.data_url, _CACHE_DATA_URL)
+    data_url = _CACHE_DATA_URL
+
+    _train(args, data_url)
+    _export_air(args, os.path.join(_CACHE_TRAIN_URL, args.ckpt_save_path))
+    mox.file.copy_parallel(_CACHE_TRAIN_URL, args.train_url)
+
+
+
+if __name__ == '__main__':
+    main()