diff --git a/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh b/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh
index 1668982990a32df44b1eea569bfd875c018a951d..61ee09c0a55d102967eeddbcda7a3af9e267ac48 100644
--- a/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh
+++ b/official/cv/maskrcnn_mobilenetv1/infer/convert/convert_om.sh
@@ -29,11 +29,6 @@ input_air_path=$1
 aipp_cfg_file=$2
 output_om_path=$3
 
-export install_path=/usr/local/Ascend/
-export ASCEND_ATC_PATH=${install_path}/atc
-
-export ASCEND_SLOG_PRINT_TO_STDOUT=1
-
 echo "Input AIR file path: ${input_air_path}"
 echo "Output OM file path: ${output_om_path}"
 
diff --git a/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp b/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp
index b10d3741558c0d35f5876506d8cf1aad9922fe2b..ec3658eabba30f2c09fe0d5617a5af9702dba599 100644
--- a/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp
+++ b/official/cv/maskrcnn_mobilenetv1/infer/mxbase/src/PostProcess/MaskRcnnMindsporePost.cpp
@@ -24,6 +24,14 @@
 #include "acl/acl.h"
 
 
+namespace localParameter {
+    const uint32_t VECTOR_FIRST_INDEX = 0;
+    const uint32_t VECTOR_SECOND_INDEX = 1;
+    const uint32_t VECTOR_THIRD_INDEX = 2;
+    const uint32_t VECTOR_FOURTH_INDEX = 3;
+    const uint32_t VECTOR_FIFTH_INDEX = 4;
+}
+
 namespace {
 // Output Tensor
 const int OUTPUT_TENSOR_SIZE = 4;
@@ -133,39 +141,45 @@ bool MaskRcnnMindsporePost::IsValidTensors(const std::vector<TensorBase> &tensor
     }
 
     uint32_t total_num = classNum_ * rpnMaxNum_;
-    if (bboxShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: " << total_num << "/" << bboxShape[VECTOR_SECOND_INDEX] << ").";
+    if (bboxShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: " << total_num << "/"
+                 << bboxShape[localParameter::VECTOR_SECOND_INDEX] << ").";
         return false;
     }
 
-    if (bboxShape[VECTOR_THIRD_INDEX] != OUTPUT_BBOX_TWO_INDEX_SHAPE) {
-        LogError << "The number of bbox[" << VECTOR_THIRD_INDEX << "] dimensions (" << bboxShape[VECTOR_THIRD_INDEX]
+    if (bboxShape[localParameter::VECTOR_THIRD_INDEX] != OUTPUT_BBOX_TWO_INDEX_SHAPE) {
+        LogError << "The number of bbox[" << localParameter::VECTOR_THIRD_INDEX << "] dimensions ("
+                 << bboxShape[localParameter::VECTOR_THIRD_INDEX]
                  << ") is not equal to (" << OUTPUT_BBOX_TWO_INDEX_SHAPE << ")";
         return false;
     }
 
     auto classShape = tensors[OUTPUT_CLASS_INDEX].GetShape();
-    if (classShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: (" << total_num << "/" << classShape[VECTOR_SECOND_INDEX]
+    if (classShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: (" << total_num << "/"
+                 << classShape[localParameter::VECTOR_SECOND_INDEX]
                  << "). ";
         return false;
     }
 
     auto maskShape = tensors[OUTPUT_MASK_INDEX].GetShape();
-    if (maskShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: (" << total_num << "/" << maskShape[VECTOR_SECOND_INDEX] << ").";
+    if (maskShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: (" << total_num << "/"
+                 << maskShape[localParameter::VECTOR_SECOND_INDEX] << ").";
         return false;
     }
 
     auto maskAreaShape = tensors[OUTPUT_MASK_AREA_INDEX].GetShape();
-    if (maskAreaShape[VECTOR_SECOND_INDEX] != total_num) {
-        LogError << "The output tensor is mismatched: (" << total_num << "/" << maskAreaShape[VECTOR_SECOND_INDEX]
+    if (maskAreaShape[localParameter::VECTOR_SECOND_INDEX] != total_num) {
+        LogError << "The output tensor is mismatched: (" << total_num << "/"
+                 << maskAreaShape[localParameter::VECTOR_SECOND_INDEX]
                  << ").";
         return false;
     }
 
-    if (maskAreaShape[VECTOR_THIRD_INDEX] != maskSize_) {
-        LogError << "The output tensor of mask is mismatched: (" << maskAreaShape[VECTOR_THIRD_INDEX] << "/"
+    if (maskAreaShape[localParameter::VECTOR_THIRD_INDEX] != maskSize_) {
+        LogError << "The output tensor of mask is mismatched: ("
+                 << maskAreaShape[localParameter::VECTOR_THIRD_INDEX] << "/"
                  << maskSize_ << ").";
         return false;
     }
@@ -186,8 +200,8 @@ static void GetDetectBoxesTopK(std::vector<MxBase::DetectBox> &detBoxes, size_t
     detBoxes.erase(detBoxes.begin() + kVal, detBoxes.end());
 }
 
-void MaskRcnnMindsporePost::GetValidDetBoxes(const std::vector<TensorBase> &tensors, std::vector<DetectBox> &detBoxes,
-                                             const uint32_t batchNum) {
+void MaskRcnnMindsporePost::GetValidDetBoxes(const std::vector<TensorBase> &tensors,
+                                             std::vector<DetectBox> &detBoxes, const uint32_t batchNum) {
     LogInfo << "Begin to GetValidDetBoxes Mask GetValidDetBoxes.";
     auto *bboxPtr = reinterpret_cast<aclFloat16 *>(GetBuffer(tensors[OUTPUT_BBOX_INDEX], batchNum));
     auto *labelPtr = reinterpret_cast<int32_t *>(GetBuffer(tensors[OUTPUT_CLASS_INDEX], batchNum));
@@ -228,8 +242,8 @@ APP_ERROR MaskRcnnMindsporePost::GetMaskSize(const ObjectInfo &objInfo, const Re
     int width = static_cast<int>(objInfo.x1 - objInfo.x0 + 1);
     int height = static_cast<int>(objInfo.y1 - objInfo.y0 + 1);
     if (width < 1 || height < 1) {
-        LogError << "The mask bbox is invalid, will be ignored, bboxWidth: " << width << ", bboxHeight: " << height
-                 << ".";
+        LogError << "The mask bbox is invalid, will be ignored, bboxWidth: " <<
+                 width << ", bboxHeight: " << height << ".";
         return APP_ERR_COMM_FAILURE;
     }
 
@@ -238,7 +252,8 @@ APP_ERROR MaskRcnnMindsporePost::GetMaskSize(const ObjectInfo &objInfo, const Re
     return APP_ERR_OK;
 }
 
-APP_ERROR MaskRcnnMindsporePost::MaskPostProcess(ObjectInfo &objInfo, void *maskPtr, const ResizedImageInfo &imgInfo) {
+APP_ERROR MaskRcnnMindsporePost::MaskPostProcess(ObjectInfo &objInfo, void *maskPtr,
+                                                 const ResizedImageInfo &imgInfo) {
     // resize
     cv::Mat maskMat(maskSize_, maskSize_, CV_32FC1);
     auto *maskAclPtr = reinterpret_cast<aclFloat16 *>(maskPtr);
diff --git a/official/cv/yolov4/infer/README.md b/official/cv/yolov4/infer/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..673cfedfee18ae6fe70619657c97a84d14a7a0c9
--- /dev/null
+++ b/official/cv/yolov4/infer/README.md
@@ -0,0 +1,831 @@
+# 浜や粯浠跺熀鏈俊鎭�
+
+**鍙戝竷鑰咃紙Publisher锛�**锛欻uawei
+
+**搴旂敤棰嗗煙锛圓pplication Domain锛�**锛歄bject Detection
+
+**鐗堟湰锛圴ersion锛�**锛�1.1
+
+**淇敼鏃堕棿锛圡odified锛�**锛�2022.3.29
+
+**澶у皬锛圫ize锛�**锛�251.52 MB (air) / 126.24 MB (om) / 503.62 MB (ckpt)
+
+**妗嗘灦锛團ramework锛�**锛歁indSpore\_1.3.0
+
+**妯″瀷鏍煎紡锛圡odel Format锛�**锛歝kpt/air/om
+
+**绮惧害锛圥recision锛�**锛歁ixed/FP16
+
+**澶勭悊鍣紙Processor锛�**锛氭槆鑵�910/鏄囪吘310
+
+**搴旂敤绾у埆锛圕ategories锛�**锛歊eleased
+
+**鎻忚堪锛圖escription锛�**锛氬熀浜嶮indSpore妗嗘灦鐨刌OLOv4缃戠粶妯″瀷璁粌骞朵繚瀛樻ā鍨嬶紝閫氳繃ATC宸ュ叿杞崲锛屽彲鍦ㄦ槆鑵続I璁惧涓婅繍琛岋紝鏀寔浣跨敤MindX SDK鍙奙xBase杩涜鎺ㄧ悊
+
+# 姒傝堪
+
+## 绠€杩�
+
+YOLOv4浣滀负鍏堣繘鐨勬娴嬪櫒锛屽畠姣旀墍鏈夊彲鐢ㄧ殑鏇夸唬妫€娴嬪櫒鏇村揩锛團PS锛夊苟涓旀洿鍑嗙‘锛圡S COCO AP50 ... 95鍜孉P50锛夈€�
+
+鏈枃宸茬粡楠岃瘉浜嗗ぇ閲忕殑鐗瑰緛锛屽苟閫夋嫨浣跨敤杩欎簺鐗瑰緛鏉ユ彁楂樺垎绫诲拰妫€娴嬬殑绮惧害銆�
+
+杩欎簺鐗规€у彲浠ヤ綔涓烘湭鏉ョ爺绌跺拰寮€鍙戠殑鏈€浣冲疄璺点€�
+
+* [鍙傝€冭鏂嘳(https://arxiv.org/pdf/2004.10934.pdf): Bochkovskiy A, Wang C Y, Liao H Y M. YOLOv4: Optimal Speed and Accuracy of Object Detection[J]. arXiv preprint arXiv:2004.10934, 2020.
+
+閫氳繃Git鑾峰彇瀵瑰簲commit_id鐨勪唬鐮佹柟娉曞涓嬶細
+
+```shell
+git clone {repository_url}     # 鍏嬮殕浠撳簱鐨勪唬鐮�
+cd {repository_name}           # 鍒囨崲鍒版ā鍨嬬殑浠g爜浠撶洰褰�
+git checkout  {branch}         # 鍒囨崲鍒板搴斿垎鏀�
+git reset --hard 锝沜ommit_id锝�  # 浠g爜璁剧疆鍒板搴旂殑commit_id
+cd 锝沜ode_path锝�                # 鍒囨崲鍒版ā鍨嬩唬鐮佹墍鍦ㄨ矾寰勶紝鑻ヤ粨搴撲笅鍙湁璇ユā鍨嬶紝鍒欐棤闇€鍒囨崲
+```
+
+## 榛樿閰嶇疆
+
+1. 缃戠粶缁撴瀯  
+
+閫夋嫨CSPDarknet53涓诲共銆丼PP闄勫姞妯″潡銆丳ANet璺緞鑱氬悎缃戠粶鍜孻OLOv4锛堝熀浜庨敋鐐癸級澶翠綔涓篩OLOv4鏋舵瀯銆�  
+
+2. 棰勮缁冩ā鍨�  
+
+YOLOv4闇€瑕丆SPDarknet53涓诲共鏉ユ彁鍙栧浘鍍忕壒寰佽繘琛屾娴嬨€�  
+
+鍙互浠嶽杩欓噷](https://gitee.com/link?target=https%3A%2F%2Fdownload.mindspore.cn%2Fmodel_zoo%2Fr1.2%2Fcspdarknet53_ascend_v120_imagenet2012_official_cv_bs64_top1acc7854_top5acc9428%2Fcspdarknet53_ascend_v120_imagenet2012_official_cv_bs64_top1acc7854_top5acc9428.ckpt) 鑾峰彇鍒板湪ImageNet2012涓婅缁冪殑棰勮缁冩ā鍨嬨€�
+
+3. 璁粌鍙傛暟
+
+```SHELL
+lr_scheduler锛歝osine_annealing
+lr锛�0.1
+training_shape: 416
+max_epochs锛�320
+warmup_epochs: 4
+```
+
+## 鏀寔鐗规€�
+
+### 鏀寔鐗规€�
+
+鏀寔鐨勭壒鎬у寘鎷細1銆佸垎甯冨紡骞惰璁粌銆�2銆佹贩鍚堢簿搴﹁缁冦€�
+
+### 鍒嗗竷寮忓苟琛岃缁�
+
+MindSpore鏀寔鏁版嵁骞惰鍙婅嚜鍔ㄥ苟琛屻€傝嚜鍔ㄥ苟琛屾槸MindSpore铻嶅悎浜嗘暟鎹苟琛屻€佹ā鍨嬪苟琛屽強娣峰悎骞惰鐨勪竴绉嶅垎甯冨紡骞惰妯″紡锛屽彲浠ヨ嚜鍔ㄥ缓绔嬩唬浠锋ā鍨嬶紝涓虹敤鎴烽€夋嫨涓€绉嶅苟琛屾ā寮忋€傜浉鍏充唬鐮佺ず渚嬨€�
+
+```shell
+context.set_auto_parallel_context(parallel_mode = ParallelMode.DATA_PARALLEL, device_num = device_num)
+```
+
+### 娣峰悎绮惧害璁粌
+
+娣峰悎绮惧害璁粌鏂规硶鏄€氳繃娣峰悎浣跨敤鍗曠簿搴﹀拰鍗婄簿搴︽暟鎹牸寮忔潵鍔犻€熸繁搴︾缁忕綉缁滆缁冪殑杩囩▼锛屽悓鏃朵繚鎸佷簡鍗曠簿搴﹁缁冩墍鑳借揪鍒扮殑缃戠粶绮惧害銆傛贩鍚堢簿搴﹁缁冭兘澶熷姞閫熻绠楄繃绋嬶紝鍚屾椂鍑忓皯鍐呭瓨浣跨敤鍜屽瓨鍙栵紝骞朵娇寰楀湪鐗瑰畾鐨勭‖浠朵笂鍙互璁粌鏇村ぇ鐨勬ā鍨嬫垨batch size銆�
+
+瀵逛簬FP16鐨勭畻瀛愶紝鑻ョ粰瀹氱殑鏁版嵁绫诲瀷鏄疐P32锛孧indSpore妗嗘灦鐨勫悗绔細杩涜闄嶇簿搴﹀鐞嗐€傜敤鎴峰彲浠ュ紑鍚疘NFO鏃ュ織锛屽苟閫氳繃鎼滅储鍏抽敭瀛椻€淩educe precision鈥濇煡鐪嬮檷绮惧害澶勭悊鐨勭畻瀛愩€�
+
+# 鍑嗗宸ヤ綔
+
+## 璁粌鐜鍑嗗
+
+1. 纭欢鐜鍑嗗璇峰弬瑙佸悇纭欢浜у搧[鈥滈┍鍔ㄥ拰鍥轰欢瀹夎鍗囩骇鎸囧崡鈥漖(https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909) 銆傞渶瑕佸湪纭欢璁惧涓婂畨瑁呬笌CANN鐗堟湰閰嶅鐨勫浐浠朵笌椹卞姩銆�
+
+2. 瀹夸富鏈轰笂闇€瑕佸畨瑁匬ython3鍜孌ocker锛屽苟鐧诲綍[Ascend Hub涓績](https://ascend.huawei.com/ascendhub/#/home) 鑾峰彇闀滃儚銆�
+
+   褰撳墠妯″瀷鏀寔鐨勯暅鍍忓垪琛ㄥ涓嬭〃鎵€绀恒€�  
+   **琛� 1** 闀滃儚鍒楄〃  
+
+    | 闀滃儚鍚嶇О | 闀滃儚鐗堟湰 | 閰嶅CANN鐗堟湰 |  
+    | ------- | ------------ | --------------------- |  
+    | ARM/x86鏋舵瀯锛歔mindspore-modelzoo](https://ascendhub.huawei.com/#/detail/mindspore-modelzoo) | 21.0.4   | [5.0.2](https://www.hiascend.com/software/cann/commercial)  |  
+
+## 鎺ㄧ悊鐜鍑嗗
+
+1. 纭欢鐜銆佸紑鍙戠幆澧冨拰杩愯鐜鍑嗗璇峰弬瑙乕銆奀ANN 杞欢瀹夎鎸囧崡銆媇(https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-upgrade) 銆�
+
+2. 瀹夸富鏈轰笂闇€瑕佸畨瑁匘ocker骞剁櫥褰昜Ascend Hub涓績](https://ascendhub.huawei.com/#/home) 鑾峰彇闀滃儚銆�
+
+   褰撳墠妯″瀷鏀寔鐨勯暅鍍忓垪琛ㄥ涓嬭〃鎵€绀恒€�
+
+   **琛� 1** 闀滃儚鍒楄〃
+
+   | 闀滃儚鍚嶇О | 闀滃儚鐗堟湰 | 閰嶅CANN鐗堟湰 |  
+   | ------- | ------------ | --------------------- |  
+   | ARM/x86鏋舵瀯锛歔infer-modelzoo](https://ascendhub.huawei.com/#/detail/infer-modelzoo) | 21.0.4   | [5.0.2](https://www.hiascend.com/software/cann/commercial) |
+
+## 婧愮爜浠嬬粛
+
+1. 鑴氭湰鐩綍缁撴瀯濡備笅锛�
+
+ ```shell
+ infer
+ 鈹溾攢鈹€README.md              # 绂荤嚎鎺ㄧ悊鏂囨。  
+ 鈹溾攢鈹€convert  
+ 鈹�    鈹溾攢鈹€aipp.config       # aipp閰嶇疆鏂囦欢  
+ 鈹�    鈹斺攢鈹€air2om.sh         # om妯″瀷杞崲鑴氭湰  
+ 鈹傗攢鈹€data  
+ 鈹�    鈹溾攢鈹€models            # 妯″瀷鏂囦欢  
+ 鈹�    鈹�   鈹溾攢鈹€yolov4_coco2017_acc_Test.cfg  #infer鐨勮秴鍙傛暟璁剧疆  
+ 鈹�    鈹�   鈹溾攢鈹€yolov4.om     # 鐢熸垚鐨刼m妯″瀷  
+ 鈹�    鈹�   鈹溾攢鈹€yolov4.air    # modelarts璁粌鍚庣敓鎴愮殑air妯″瀷  
+ 鈹�    鈹�   鈹溾攢鈹€trainval.txt  # 涓篿nfer鍑嗗鐨勯獙璇佹暟鎹�
+ 鈹�    鈹�   鈹溾攢鈹€object_task_metric.py  # 灏唅nfer鍚庣殑缁撴灉杞崲涓篶oco妯″紡
+ 鈹�    鈹�   鈹斺攢鈹€coco2017.names  # coco鏁版嵁闆嗘牱鏈殑label
+ 鈹�    鈹斺攢鈹€images            # 妯″瀷杈撳叆鏁版嵁闆�, 灏嗘暟鎹泦涓殑val2017涓殑鍐呭鎷疯礉杩囨潵
+ 鈹傗攢鈹€鈹€mxbase                # 鍩轰簬mxbase鎺ㄧ悊鑴氭湰
+ 鈹�    鈹溾攢鈹€src
+ 鈹�    鈹�   鈹溾攢鈹€PostProcess   # 鍓嶅鐞�
+ 鈹�    鈹�   鈹�   鈹溾攢鈹€Yolov4MindsporePost.cpp
+ 鈹�    鈹�   鈹�   鈹斺攢鈹€Yolov4MindsporePost.h
+ 鈹�    鈹�   鈹溾攢鈹€Yolov4Detection.h
+ 鈹�    鈹�   鈹溾攢鈹€Yolov4Detection.cpp
+ 鈹�    鈹�   鈹斺攢鈹€main.cpp
+ 鈹�    鈹溾攢鈹€CMakeLists.txt  
+ 鈹�    鈹溾攢鈹€build.sh          # 缂栬瘧
+ 鈹�    鈹斺攢鈹€infermxbase.sh    # 楠岃瘉鎺ㄧ悊缁撴灉绮惧害
+ 鈹傗攢鈹€sdk                    # 鍩轰簬sdk鍖呮帹鐞嗚剼鏈�
+ 鈹�    鈹溾攢鈹€mxpi
+ 鈹�    鈹�   鈹溾攢鈹€CMakeLists.txt
+ 鈹�    鈹�   鈹斺攢鈹€build.sh  
+ 鈹�    鈹溾攢鈹€config
+ 鈹�    鈹�   鈹斺攢鈹€yolov4.pipeline
+ 鈹�    鈹溾攢鈹€run.sh  
+ 鈹�    鈹斺攢鈹€infersdk.sh      # 楠岃瘉鎺ㄧ悊缁撴灉绮惧害
+ 鈹斺攢鈹€ docker_start_infer.sh # 鍚姩瀹瑰櫒鑴氭湰
+ ```
+
+# 璁粌
+
+## 鏁版嵁闆嗗噯澶�
+
+1. 璇风敤鎴疯嚜琛屽噯澶囧ソ鏁版嵁闆嗭紝浣跨敤鐨勬暟鎹泦锛歔COCO2017](https://gitee.com/link?target=https%3A%2F%2Fcocodataset.org%2F%23download)  
+
+* 鏀寔鐨勬暟鎹泦锛歔COCO2017](https://gitee.com/link?target=https%3A%2F%2Fcocodataset.org%2F%23download) 鎴栦笌MS COCO鏍煎紡鐩稿悓鐨勬暟鎹泦  
+
+* 鏀寔鐨勬爣娉細[COCO2017](https://gitee.com/link?target=https%3A%2F%2Fcocodataset.org%2F%23download) 鎴栦笌MS COCO鐩稿悓鏍煎紡鐨勬爣娉�
+
+2. 鏁版嵁鍑嗗
+
+* 灏嗘暟鎹泦鏀惧埌浠绘剰璺緞锛屾枃浠跺す搴旇鍖呭惈濡備笅鏂囦欢
+
+      ```SHELL
+      .
+      鈹斺攢鈹€ datasets
+        鈹溾攢鈹€ annotations
+          鈹�   鈹溾攢 instances_train2017.json
+          鈹�   鈹斺攢 instances_val2017.json
+          鈹溾攢 train2017  
+          鈹�   鈹溾攢picture1.jpg
+          鈹�   鈹溾攢 ...
+          鈹�   鈹斺攢picturen.jpg
+          鈹溾攢 val2017
+              鈹溾攢picture1.jpg
+              鈹溾攢 ...
+              鈹斺攢picturen.jpg
+      ```
+
+* 涓烘暟鎹泦鐢熸垚TXT鏍煎紡鎺ㄧ悊鏂囦欢銆�
+
+      ```shell
+      # 瀵煎嚭txt鎺ㄧ悊鏁版嵁
+      python coco_trainval_anns.py --data_url=./datasets/ --train_url=./infer/data/models/ --val_url=./infer/data/images/
+      #data_url鍙傛暟涓烘暟鎹泦datasets瀛樺偍璺緞锛宼rain_url鍙傛暟涓哄瓨鍌╰xt璺緞锛寁al_url鍙傛暟涓烘帹鐞嗘暟鎹泦瀛樻斁鐨勮矾寰�
+      ```
+
+      姣忚濡備笅鎵€绀猴細
+
+      ```  SHELL
+      0 ../infer/data/images/000000289343.jpg 529 640 16 473 395 511 423 0 204 235 264 412 13 0 499 339 605 1 204 304 256 456
+      ```  
+
+      姣忚鏄寜绌洪棿鍒嗗壊鐨勫浘鍍忔爣娉紝绗竴鍒楁暟鏄簭鍙凤紝绗簩鍒楁槸鎺ㄧ悊浣跨敤鐨勫浘鍍忕殑缁濆璺緞锛屽叾浣欎负[xmin,ymin,xmax,ymax,class]鏍煎紡鐨勬鍜岀被淇℃伅銆�
+
+## 楂樼骇鍙傝€�
+
+### 鑴氭湰鍙傛暟
+
+1. 璁粌鍜屾祴璇曢儴鍒嗛噸瑕佸弬鏁板涓嬶細
+
+   ```SHELL
+   usage: modelarts.py  [--data_url DATA_URL] [--train_url TRAIN_URL] [--checkpoint_url CHECKPOINT_URL]  
+   options:
+      --train_url    The path model saved
+      --data_url   Dataset directory
+      --checkpoint_url   The path pre-model saved
+   ```
+
+2. 鍙傛暟鎰忎箟濡備笅锛�
+
+   ```SHELL
+    # Train options
+    data_dir: "Train dataset directory."
+    per_batch_size: "Batch size for Training."
+    pretrained_backbone: "The ckpt file of CspDarkNet53."
+    resume_yolov4: "The ckpt file of YOLOv4, which used to fine tune."
+    pretrained_checkpoint: "The ckpt file of YoloV4CspDarkNet53."
+    filter_weight: "Filter the last weight parameters"
+    lr_scheduler: "Learning rate scheduler, options: exponential, cosine_annealing."
+    lr: "Learning rate."
+    lr_epochs: "Epoch of changing of lr changing, split with ','."
+    lr_gamma: "Decrease lr by a factor of exponential lr_scheduler."
+    eta_min: "Eta_min in cosine_annealing scheduler."
+    t_max: "T-max in cosine_annealing scheduler."
+    max_epoch: "Max epoch num to train the model."
+    warmup_epochs: "Warmup epochs."
+    weight_decay: "Weight decay factor."
+    momentum: "Momentum."
+    loss_scale: "Static loss scale."
+    label_smooth: "Whether to use label smooth in CE."
+    label_smooth_factor: "Smooth strength of original one-hot."
+    log_interval: "Logging interval steps."
+    ckpt_path: "Checkpoint save location."
+    ckpt_interval: "Save checkpoint interval."
+    is_save_on_master: "Save ckpt on master or all rank, 1 for master, 0 for all ranks."
+    is_distributed: "Distribute train or not, 1 for yes, 0 for no."
+    rank: "Local rank of distributed."
+    group_size: "World size of device."
+    need_profiler: "Whether use profiler. 0 for no, 1 for yes."
+    training_shape: "Fix training shape."
+    resize_rate: "Resize rate for multi-scale training."
+    run_eval: "Run evaluation when training."
+    save_best_ckpt: "Save best checkpoint when run_eval is True."
+    eval_start_epoch: "Evaluation start epoch when run_eval is True."
+    eval_interval: "Evaluation interval when run_eval is True"
+    ann_file: "path to annotation"
+    each_multiscale: "Apply multi-scale for each scale"
+    detect_head_loss_coff: "the loss coefficient of detect head.
+                           The order of coefficients is large head, medium head and small head"
+    bbox_class_loss_coff: "bbox and class loss coefficient.
+                           The order of coefficients is ciou loss, confidence loss and class loss"
+    labels: "the label of train data"
+    mosaic: "use mosaic data augment"
+    multi_label: "use multi label to nms"
+    multi_label_thresh: "multi label thresh"
+
+    # Eval options
+    pretrained: "model_path, local pretrained model to load"
+    log_path: "checkpoint save location"
+    ann_val_file: "path to annotation"
+
+    # Export options
+    device_id: "Device id for export"
+    batch_size: "batch size for export"
+    testing_shape: "shape for test"
+    ckpt_file: "Checkpoint file path for export"
+    file_name: "output file name for export"
+    file_format: "file format for export"
+    keep_detect: "keep the detect module or not, default: True"
+    img_id_file_path: 'path of image dataset'
+    result_files: 'path to 310 infer result floder'
+   ```
+
+# 鎺ㄧ悊
+
+## 鍑嗗鎺ㄧ悊鏁版嵁
+
+1. 涓嬭浇婧愮爜鍖呫€�
+
+   鍗曞嚮鈥滀笅杞芥ā鍨嬭剼鏈€濆拰鈥滀笅杞芥ā鍨嬧€濓紝骞朵笅杞芥墍闇€MindX SDK寮€鍙戝浠讹紙mxManufacture锛夈€�
+
+2. 灏嗘簮鐮佷笂浼犺嚦鎺ㄧ悊鏈嶅姟鍣ㄤ换鎰忕洰褰曞苟瑙e帇锛堝锛氣€�/home/data/wwq鈥滐級銆�
+
+3. 缂栬瘧闀滃儚銆�
+
+   **docker build -t** *infer_image* **--build-arg FROM_IMAGE_NAME=** *base_image:tag* **--build-arg SDK_PKG=** *sdk_pkg* **.**
+
+   **琛� 1**  鍙傛暟璇存槑
+
+   <table><thead align="left"><tr id="zh-cn_topic_0304403934_row9243114772414"><th class="cellrowborder" valign="top" width="40%" id="mcps1.2.3.1.1"><p id="zh-cn_topic_0304403934_p524364716241"><a name="zh-cn_topic_0304403934_p524364716241"></a><a name="zh-cn_topic_0304403934_p524364716241"></a>鍙傛暟</p>
+   </th>
+   <th class="cellrowborder" valign="top" width="60%" id="mcps1.2.3.1.2"><p id="zh-cn_topic_0304403934_p172431247182412"><a name="zh-cn_topic_0304403934_p172431247182412"></a><a name="zh-cn_topic_0304403934_p172431247182412"></a>璇存槑</p>
+   </th>
+   </tr>
+   </thead>
+   <tbody><tr id="zh-cn_topic_0304403934_row52431473244"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p144312172333"><a name="p144312172333"></a><a name="p144312172333"></a><em id="i290520133315"><a name="i290520133315"></a><a name="i290520133315"></a>infer_image</em></p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="zh-cn_topic_0304403934_p10243144712410"><a name="zh-cn_topic_0304403934_p10243144712410"></a><a name="zh-cn_topic_0304403934_p10243144712410"></a>鎺ㄧ悊闀滃儚鍚嶇О锛屾牴鎹疄闄呭啓鍏ャ€�</p>
+   </td>
+   </tr>
+   <tr id="zh-cn_topic_0304403934_row1624394732415"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="zh-cn_topic_0304403934_p92434478242"><a name="zh-cn_topic_0304403934_p92434478242"></a><a name="zh-cn_topic_0304403934_p92434478242"></a><em id="i78645182347"><a name="i78645182347"></a><a name="i78645182347"></a>base_image</em></p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="zh-cn_topic_0304403934_p324313472240"><a name="zh-cn_topic_0304403934_p324313472240"></a><a name="zh-cn_topic_0304403934_p324313472240"></a>鍩虹闀滃儚锛屽彲浠嶢scend Hub涓婁笅杞姐€�</p>
+   </td>
+   </tr>
+   <tr id="row2523459163416"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p55241359203412"><a name="p55241359203412"></a><a name="p55241359203412"></a><em id="i194517711355"><a name="i194517711355"></a><a name="i194517711355"></a>tag</em></p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="p1952435919341"><a name="p1952435919341"></a><a name="p1952435919341"></a>闀滃儚tag锛岃鏍规嵁瀹為檯閰嶇疆锛屽锛�21.0.1銆�</p>
+   </td>
+   </tr>
+   <tr id="zh-cn_topic_0304403934_row132436473240"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="zh-cn_topic_0304403934_p1824319472242"><a name="zh-cn_topic_0304403934_p1824319472242"></a><a name="zh-cn_topic_0304403934_p1824319472242"></a>sdk_pkg</p>
+   </td>
+   <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="zh-cn_topic_0304403934_p7243144712249"><a name="zh-cn_topic_0304403934_p7243144712249"></a><a name="zh-cn_topic_0304403934_p7243144712249"></a>涓嬭浇鐨刴xManufacture鍖呭悕绉帮紝濡侫scend-mindxsdk-mxmanufacture_<em id="i061383054119"><a name="i061383054119"></a><a name="i061383054119"></a>{version}</em>_linux-<em id="i1055956194514"><a name="i1055956194514"></a><a name="i1055956194514"></a>{arch}</em>.run銆�</p>
+   </td>
+   </tr>
+   </tbody>
+   </table>
+
+   > ![杈撳叆鍥剧墖璇存槑](https://images.gitee.com/uploads/images/2021/0719/172222_3c2963f4_923381.gif "icon-note.gif") **璇存槑锛�**  
+   > 涓嶈閬楁紡鍛戒护缁撳熬鐨勨€�.鈥溿€�
+
+4. 鍑嗗鏁版嵁銆�
+
+   鎵ц浣嶄簬/infer/data/models涓嬬殑coco_trainval_anns.py鑴氭湰锛屽鍑哄噯澶囩敤浜庢帹鐞嗙殑鏁版嵁銆�
+
+      ```shell
+      # 瀵煎嚭txt鎺ㄧ悊鏁版嵁
+      python coco_trainval_anns.py --data_url=./datasets/ --train_url=./infer/data/models/ --val_url=./infer/data/images/
+      #data_url鍙傛暟涓烘暟鎹泦datasets瀛樺偍璺緞锛宼rain_url鍙傛暟涓哄瓨鍌╰xt璺緞,val_url涓烘帹鐞嗘暟鎹泦瀛樻斁鐨勮矾寰�
+      ```
+
+   AIR妯″瀷鍙€氳繃鈥滄ā鍨嬭缁冣€濆悗杞崲鐢熸垚銆�
+
+   灏嗙敓鎴愮殑鎺ㄧ悊鏁版嵁鎷疯礉鍒� infer/data/models銆乮nfer/mxbase 鍜� infer/sdk 鐩綍涓嬨€�
+
+5. 鍚姩瀹瑰櫒銆�
+
+   杩涘叆鈥渋nfer鈥滅洰褰曪紝鎵ц浠ヤ笅鍛戒护锛屽惎鍔ㄥ鍣ㄣ€�  
+
+   ```shell
+   bash docker_start_infer.sh docker_image:tag model_dir
+   ```
+
+   > ![杈撳叆鍥剧墖璇存槑](https://images.gitee.com/uploads/images/2021/0926/181445_0077d606_8725359.gif) **璇存槑锛�**
+   > MindX SDK寮€鍙戝浠讹紙mxManufacture锛夊凡瀹夎鍦ㄥ熀纭€闀滃儚涓紝瀹夎璺緞锛氣€�/usr/local/sdk_home鈥溿€�
+
+   **琛� 2** 鍙傛暟璇存槑
+
+   | 鍙傛暟           | 璇存槑                                  |
+   | -------------- | ------------------------------------- |
+   | *docker_image* | 鎺ㄧ悊闀滃儚鍚嶇О鍙婇暅鍍弔ag锛屾牴鎹疄闄呭啓鍏ャ€� |
+   | tag | 闀滃儚tag锛岃鏍规嵁瀹為檯閰嶇疆锛屽锛�21.0.2銆� |
+   | data_path      | 浠g爜璺緞銆�                            |
+
+   鍚姩瀹瑰櫒鏃朵細灏嗘帹鐞嗚姱鐗囧拰鏁版嵁璺緞鎸傝浇鍒板鍣ㄤ腑銆傚彲鏍规嵁闇€瑕侀€氳繃淇敼**docker_start_infer.sh**鐨刣evice鏉ユ寚瀹氭寕杞界殑鎺ㄧ悊鑺墖銆�
+
+## 妯″瀷杞崲
+
+   1. 鍑嗗妯″瀷鏂囦欢銆�
+
+* 灏哅odelArts璁粌涔嬪悗瀵煎嚭鐨� **.air 妯″瀷鏂囦欢鏀惧叆 infer/data/models 鐩綍涓�
+
+   2. 妯″瀷杞崲銆�
+
+* 鎵ц infer/convert/air2om.sh锛� 杞崲鍛戒护濡備笅 銆�
+
+      ```SHELL
+      cd ./infer/convert
+      #bash air2om.sh air_path(杞崲鑴氭湰AIR鏂囦欢璺緞) om_path(鐢熸垚鐨凮M鏂囦欢鍚嶏紝杞崲鑴氭湰浼氬湪姝ゅ熀纭€涓婃坊鍔�.om鍚庣紑)
+      bash air2om.sh ../data/models/yolov4.air ../data/models/yolov4
+      ```
+
+      鎵ц瀹屾垚鍚庝細鍦� infer/data/model 鐩綍涓嬬敓鎴� **.om 妯″瀷鏂囦欢锛屾敞鎰忔澶� om 鏂囦欢鍚嶉渶涓� pipeline 涓殑淇濇寔涓€鑷淬€�
+
+## MxBase鎺ㄧ悊
+
+   1. 閰嶇疆鐜鍙橀噺
+
+      ```SHELL
+      export ASCEND_HOME=/usr/local/Ascend
+      export ASCEND_VERSION=ascend-toolkit/latest
+      export ARCH_PATTERN=.
+      export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib/modelpostprocessors:$LD_LIBRARY_PATH
+      ```
+
+   2. 锛堝彲閫夛級淇敼閰嶇疆鏂囦欢
+      鍙牴鎹疄闄呮儏鍐典慨鏀癸紝閰嶇疆鏂囦欢浣嶄簬鈥渕xbase/src/main.cpp鈥濅腑锛屽彲淇敼鍙傛暟濡備笅:
+
+      ```SHELL
+      initParam.deviceId = 0;
+      initParam.labelPath = "../data/models/coco2017.names";#瀹為檯浣跨敤鐨勬爣绛惧悕琛�
+      initParam.checkTensor = true;
+      initParam.modelPath = "../data/models/yolov4.om";#瀹為檯鐨勬帹鐞嗘ā鍨嬫枃浠�
+      initParam.classNum = 80;#瀹為檯鏁版嵁闆嗙被鍒暟
+      initParam.biasesNum = 18;
+      initParam.biases = "12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401";
+      initParam.objectnessThresh = "0.001";
+      initParam.iouThresh = "0.6";#nms鐢ㄥ埌鐨処OU闃堝€硷紝鍙皟鏁�
+      initParam.scoreThresh = "0.001";
+      initParam.yoloType = 3;
+      initParam.modelType = 0;
+      initParam.inputType = 0;
+      initParam.anchorDim = 3;
+      ```
+
+      鏍规嵁瀹為檯鎯呭喌淇敼"mxbase/src/Yolov4Detection.cpp"涓殑鍥剧墖缂╂斁灏哄锛�
+
+      ```SHELL
+      APP_ERROR Yolov4TinyDetectionOpencv::Resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat) {
+         static constexpr uint32_t resizeHeight = 608; #妯″瀷杈撳叆楂樺害
+         static constexpr uint32_t resizeWidth = 608; #妯″瀷杈撳叆瀹藉害
+         cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight));
+         return APP_ERR_OK;
+      }
+      ```
+
+   3. 缂栬瘧宸ョ▼銆�
+
+      ```
+      cd ./infer/mxbase
+      bash build.sh
+      ```
+
+   4. 杩愯鎺ㄧ悊鏈嶅姟銆�
+
+      a. 纭繚楠岃瘉闆嗗浘鐗囩殑鏉冮檺涓�640
+
+      ```shell
+      #鍙互閫氳繃浠ヤ笅鍛戒护纭繚楠岃瘉闆嗗浘鐗囩殑鏉冮檺涓�640
+      chmod 640 ../data/images/. -R #姝ゅ涓洪獙璇侀泦鍥剧墖鍦板潃
+      ```
+
+      b. 纭繚result鏂囦欢澶逛负绌猴紝鎴栬€呬笉瀛樺湪
+
+      ```shell
+      #鍙互閫氳繃浠ヤ笅鍛戒护纭繚缁撴灉鏂囦欢澶逛负绌猴紝鎴栬€呬笉瀛樺湪
+      rm -rf ./result/result.txt #鍒犻櫎缁撴灉鏂囦欢
+      rm -rf ./result #鍒犻櫎缁撴灉鏂囦欢澶�
+      rm -rf ./result.json #鍒犻櫎缁撴灉杞崲鏂囦欢
+      ```
+
+      c. 鎵ц鎺ㄧ悊鑴氭湰锛岀‘淇濊褰曟帹鐞嗗浘鐗囪矾寰勭殑鏂囦欢鍦�/infer/mxbase鏂囦欢澶逛笅锛屽懡浠ゅ涓� 銆�
+
+      ```shell
+      #./build/Yolov4_mindspore image_path_txt(璁板綍鎺ㄧ悊鍥剧墖璺緞鐨則xt鏂囦欢銆傚锛歵rainval.txt)
+      ./build/Yolov4_mindspore ./trainval.txt
+      ```
+
+      鎺ㄧ悊缁撴灉淇濆瓨鍦ㄢ€�./result/result.txt鈥濄€�
+
+   5. 瑙傚療缁撴灉銆�
+
+      鎷疯礉infer/data/models/object_task_metric.py 鍜宑oco2017鐨勯獙璇侀泦鏍囩instances_val2017.json鏂囦欢鍒扳€渕xbase鈥濈洰褰曚笅.  
+      鏍规嵁瀹為檯鎯呭喌淇敼object_task_metric.py浠g爜
+
+      ```shell
+      if __name__ == "__main__":
+        ban_path = './trainval.txt' # 淇敼涓哄疄闄呯殑鎺ㄧ悊鏁版嵁闆嗚矾寰勭殑鏂囦欢
+        input_file = './result/result.txt'
+        if not os.path.exists(ban_path):
+            print('The infer text file does not exist.')
+        if not os.path.exists(input_file):
+            print('The result text file does not exist.')
+
+        image_id_list = get_image_id(ban_path)
+        result_dict = get_dict_from_file(input_file, image_id_list)
+        json_file_name = './result.json'
+        with open(json_file_name, 'w') as f:
+            json.dump(result_dict, f)
+
+        # set iouType to 'segm', 'bbox' or 'keypoints'
+        ann_type = ('segm', 'bbox', 'keypoints')
+        # specify type here
+        ann_type = ann_type[1]
+        coco_gt_file = './instances_val2017.json' # 淇敼涓虹湡瀹炴爣绛炬枃浠�
+      ```
+
+   6. 鏌ョ湅绮惧害  
+
+      鎵ц浠ヤ笅鍛戒护璁$畻绮惧害銆�
+
+      ```shell
+      bash infermxbase.sh
+      ```
+
+      鎺ㄧ悊缁撴灉浠son鏍煎紡淇濆瓨锛岃矾寰勪负鈥�./result.json鈥濄€�  
+      绮惧害淇℃伅绀轰緥濡備笅鎵€绀猴細
+
+      ```shell
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.455
+       Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.646
+       Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.495
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.278
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.481
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.565
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.358
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.575
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.605
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.424
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.632
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.710
+      ```
+
+## MindX SDK鎺ㄧ悊
+
+   1. 缂栬瘧鍚庡鐞嗕唬鐮�
+
+      鍩轰簬MindX SDK鎺ㄧ悊鐨勫悗澶勭悊浠g爜缂栬瘧鏃剁洿鎺ョ紪璇� 鈥渕xbase/src/PostProcess鈥濄€�
+
+      ```shell
+      cd infer/sdk/mxpi
+      bash build.sh
+      ```
+
+   2. 淇敼閰嶇疆鏂囦欢銆�
+
+      a.鏍规嵁瀹為檯鎯呭喌淇敼config涓殑pipeline鏂囦欢銆�
+
+      ```shell
+      {
+       "im_yolov4": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_imagedecoder0"
+        },
+        "mxpi_imagedecoder0": {
+            "props": {
+                "handleMethod": "opencv"
+            },
+            "factory": "mxpi_imagedecoder",
+            "next": "mxpi_imageresize0"
+        },
+        "mxpi_imageresize0": {
+            "props": {
+                "parentName": "mxpi_imagedecoder0",
+                "handleMethod": "opencv",
+                "resizeHeight": "608",#妯″瀷杈撳叆楂樺害
+                "resizeWidth": "608",#妯″瀷杈撳叆瀹藉害
+                "resizeType": "Resizer_Stretch"
+            },
+            "factory": "mxpi_imageresize",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "mxpi_imageresize0",
+                "modelPath": "../data/models/yolov4.om",#鎺ㄧ悊妯″瀷璺緞
+                "waitingTime": "3000",
+                "outputDeviceId": "-1"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_objectpostprocessor0"
+        },
+        "mxpi_objectpostprocessor0": {
+            "props": {
+                "dataSource": "mxpi_tensorinfer0",
+                "postProcessConfigPath": "../data/models/yolov4_coco2017_acc_test.cfg",#鎺ㄧ悊鍚庡鐞嗙浉鍏冲弬鏁伴厤缃枃浠惰矾寰�
+                "labelPath": "../data/models/coco2017.names",#鎺ㄧ悊鏁版嵁闆嗙被鍒爣绛炬枃浠讹紝闇€鑷娣诲姞鍒板搴旂洰褰�
+                "postProcessLibPath": "./mxpi/build/libyolov4_mindspore_post.so"#缂栬瘧鍚庡鐞唖o鏂囦欢
+            },
+            "factory": "mxpi_objectpostprocessor",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_objectpostprocessor0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+        }
+      }
+      ```
+
+      b. 鍙牴鎹疄闄呮儏鍐典慨鏀瑰悗澶勭悊閰嶇疆鏂囦欢  
+
+      鍏堕厤缃枃浠秠olov4_coco2017_acc_test.cfg鍦ㄢ€�../data/models/鈥濈洰褰曚笅.
+
+      ```shell
+      # hyper-parameter
+      CLASS_NUM=80 #鎺ㄧ悊鏁版嵁闆嗙被鍒暟
+      BIASES_NUM=18
+      BIASES=12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401
+      SCORE_THRESH=0.001
+      OBJECTNESS_THRESH=0.001
+      IOU_THRESH=0.6 #nms鐢ㄥ埌鐨処OU闃堝€硷紝鍙皟鏁�
+      YOLO_TYPE=3
+      ANCHOR_DIM=3
+      MODEL_TYPE=0
+      RESIZE_FLAG=0
+      ```
+
+   3. 杩愯鎺ㄧ悊鏈嶅姟銆�  
+
+      a. 纭繚trainval.txt鏂囦欢鍦╯dk鐩綍涓嬨€�
+
+      b. 淇敼main.py涓褰曟帹鐞嗗浘鐗囪矾寰勭殑鏂囦欢璺緞銆�  
+
+      ```shell
+      infer_file = './trainval.txt' #鏍规嵁瀹為檯鎯呭喌杩涜淇敼
+      ```
+
+      c. 纭繚楠岃瘉闆嗗浘鐗囧拰缂栬瘧鍚庣敓鎴愮殑鍚庡鐞嗘枃浠�/sdk/mxpi/build/libyolov4_mindspore_post.so鐨勬潈闄愪负640
+
+      ```shell
+      #鍙互閫氳繃浠ヤ笅鍛戒护纭繚楠岃瘉闆嗗浘鐗囧拰鍚庡鐞嗘枃浠剁殑鏉冮檺涓�640
+      chmod 640 ../data/images/. -R #姝ゅ涓洪獙璇侀泦鍥剧墖鍦板潃
+      chmod 640 ./mxpi/build/libyolov4_mindspore_post.so #姝ゅ涓哄悗澶勭悊鏂囦欢璺緞
+      ```
+
+      d. 纭繚result鏂囦欢澶逛负绌猴紝鎴栬€呬笉瀛樺湪
+
+      ```shell
+      #鍙互閫氳繃浠ヤ笅鍛戒护纭繚缁撴灉鏂囦欢澶逛负绌猴紝鎴栬€呬笉瀛樺湪
+      rm -rf ./result/result.txt #鍒犻櫎缁撴灉鏂囦欢
+      rm -rf ./result #鍒犻櫎缁撴灉鏂囦欢澶�
+      rm -rf ./result.json #鍒犻櫎缁撴灉杞崲鏂囦欢
+      ```
+
+      e. 鎵ц鎺ㄧ悊
+
+      ```shell
+      cd infer/sdk
+      bash run.sh
+      ```
+
+   4. 瑙傚療缁撴灉銆�
+
+      鎷疯礉infer/data/models/object_task_metric.py鍜宑oco2017鐨勯獙璇侀泦鏍囩instances_val2017.json鏂囦欢鍒扳€渟dk鈥濈洰褰曚笅銆�  
+      鏍规嵁瀹為檯鎯呭喌淇敼object_task_metric.py浠g爜銆�  
+
+      ```shell
+      ...
+      if __name__ == "__main__":
+        ban_path = './trainval.txt' # 淇敼涓哄疄闄呯殑鎺ㄧ悊鏁版嵁闆嗚矾寰勭殑鏂囦欢
+        input_file = './result/result.txt'
+        if not os.path.exists(ban_path):
+            print('The infer text file does not exist.')
+        if not os.path.exists(input_file):
+            print('The result text file does not exist.')
+
+        image_id_list = get_image_id(ban_path)
+        result_dict = get_dict_from_file(input_file, image_id_list)
+        json_file_name = './result.json'
+        with open(json_file_name, 'w') as f:
+            json.dump(result_dict, f)
+
+        # set iouType to 'segm', 'bbox' or 'keypoints'
+        ann_type = ('segm', 'bbox', 'keypoints')
+        # specify type here
+        ann_type = ann_type[1]
+        coco_gt_file = './instances_val2017.json' # 淇敼涓虹湡瀹炴爣绛炬枃浠�
+      ...
+      ```
+
+   5. 鏌ョ湅绮惧害
+
+      鎵ц浠ヤ笅鍛戒护璁$畻绮惧害銆�
+
+      ```shell
+      bash infersdk.sh
+      ```
+
+      鎺ㄧ悊缁撴灉浠son鏍煎紡淇濆瓨锛岃矾寰勪负鈥�./result.json鈥濄€�  
+      绮惧害淇℃伅绀轰緥濡備笅鎵€绀猴細
+
+      ```shell
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.455
+       Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.646
+       Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.495
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.278
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.481
+       Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.565
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.358
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.575
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.605
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.424
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.632
+       Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.710
+      ```
+
+   6. 鎬ц兘娴嬭瘯銆�
+
+         寮€鍚€ц兘缁熻寮€鍏筹紝鍦╯dk.conf閰嶇疆鏂囦欢涓紝璁剧疆 enable_ps=true锛屽紑鍚€ц兘缁熻寮€鍏炽€�
+
+         璋冩暣鎬ц兘缁熻鏃堕棿闂撮殧锛岃缃畃s_interval_time=2锛屾瘡闅�2绉掞紝杩涜涓€娆℃€ц兘缁熻銆�
+
+         杩涘叆infer/sdk鐩綍锛屾墽琛屾帹鐞嗗懡浠よ剼鏈紝鍚姩SDK鎺ㄧ悊鏈嶅姟 銆�
+
+   7. 鏌ョ湅鎬ц兘缁撴灉銆�  
+
+         鍦ㄦ棩蹇楃洰褰�"~/MX_SDK_HOME/logs"鏌ョ湅鎬ц兘缁熻缁撴灉銆�
+
+         ```shell
+         performance-statistics.log.e2e.xx脳
+         performance-statistics.log.plugin.xx脳
+         performance-statistics.log.tpr.xxx
+         ```
+
+         鍏朵腑e2e鏃ュ織缁熻绔埌绔椂闂达紝plugin鏃ュ織缁熻鍗曟彃浠舵椂闂淬€�
+
+# 鍦∕odelArts涓婂簲鐢�
+
+## 鍒涘缓OBS妗�
+
+1. 鍒涘缓妗躲€�
+
+* 鐧诲綍[OBS绠$悊鎺у埗鍙癩(https://storage.huaweicloud.com/obs)锛屽垱寤篛BS妗讹紝鍏蜂綋璇峰弬瑙乕鈥滃垱寤烘《鈥漖(https://support.huaweicloud.com/usermanual-obs/obs_03_0306.html)绔犺妭銆�
+* 鈥濆尯鍩熲€滈€夋嫨鈥濆崕鍖�-鍖椾含鍥涒€�
+* 鈥濆瓨鍌ㄧ被鍒€滈€夊彇鈥濇爣鍑嗗瓨鍌ㄢ€�
+* 鈥濇《ACL鈥滈€夊彇鈥濈鏈夆€�
+* 鍏抽棴鈥濆AZ鈥�
+* 杈撳叆鍏ㄥ眬鍞竴妗跺悕绉�, 渚嬪 鈥淪3"
+* 鐐瑰嚮鈥濈‘瀹氣€�
+
+2. 鍒涘缓鏂囦欢澶瑰瓨鏀炬暟鎹€�
+
+   鍦ㄥ垱寤虹殑妗朵腑鍒涘缓浠ヤ笅鏂囦欢澶癸細
+
+* code锛氬瓨鏀捐缁冭剼鏈�
+* datasets: 瀛樻斁鏁版嵁闆�
+* preckpt锛氬瓨鏀鹃璁粌妯″瀷
+* output: 瀛樻斁璁粌鐢熸垚ckpt妯″瀷
+* logs锛氬瓨鏀捐缁冩棩蹇楃洰褰�
+
+3. 涓婁紶浠g爜
+
+* 杩涘叆 yolov4 浠g爜鏂囦欢鏍圭洰褰�
+ * 灏� yolov4 鐩綍涓嬬殑鏂囦欢鍏ㄩ儴涓婁紶鑷� obs://S3/yolov4 鏂囦欢澶逛笅
+
+## 鍒涘缓绠楁硶
+
+1. 浣跨敤鍗庝负浜戝笎鍙风櫥褰昜ModelArts绠$悊鎺у埗鍙癩(https://console.huaweicloud.com/modelarts)锛屽湪宸︿晶瀵艰埅鏍忎腑閫夋嫨鈥滅畻娉曠鐞嗏€濄€�
+2. 鍦ㄢ€滄垜鐨勭畻娉曠鐞嗏€濈晫闈紝鍗曞嚮宸︿笂瑙掆€滃垱寤衡€濓紝杩涘叆鈥滃垱寤虹畻娉曗€濋〉闈€€�
+3. 鍦ㄢ€滃垱寤虹畻娉曗€濋〉闈紝濉啓鐩稿叧鍙傛暟锛岀劧鍚庡崟鍑烩€滄彁浜も€濄€�
+4. 璁剧疆绠楁硶鍩烘湰淇℃伅濡備笅銆�
+
+```text
+   # ==================================鍒涘缓绠楁硶==========================================
+   # (1) 涓婁紶浣犵殑浠g爜鍜屾暟鎹泦鍒� S3 妗朵笂
+   # (2) 鍒涘缓鏂瑰紡: 鑷畾涔夎剼鏈�
+         AI寮曟搸锛欰scend-Powered-Engine mindspore_1.3.0-cann_5.0.2-py_3.7-euler_2.8.3-aarch64
+         浠g爜鐩綍锛� /S3/yolov4/
+         鍚姩鏂囦欢锛� /S3/yolov4/modelarts.py
+   # (3) 瓒呭弬锛�
+         鍚嶇О               绫诲瀷            蹇呴渶
+         data_url         String          鏄�
+         train_url        String          鏄�
+         checkpoint_url   String          鏄�
+   # (4) 鑷畾涔夎秴鍙傦細鏀寔
+   # (5) 杈撳叆鏁版嵁閰嶇疆:  "鏄犲皠鍚嶇О = '鏁版嵁鏉ユ簮2'", "浠g爜璺緞鍙傛暟 = 'data_url'","鏄犲皠鍚嶇О = '鏁版嵁鏉ユ簮3'", "浠g爜璺緞鍙傛暟 = 'checkpoint_url'"
+   # (6) 杈撳嚭鏁版嵁閰嶇疆:  "鏄犲皠鍚嶇О = '杈撳嚭鏁版嵁1'", "浠g爜璺緞鍙傛暟 = 'train_url'"
+   # (7) 娣诲姞璁粌绾︽潫锛� 鍚�
+```
+
+## 鍒涘缓璁粌浣滀笟
+
+1. 鐧诲綍ModelArts銆�
+
+2. 鍒涘缓璁粌浣滀笟銆�
+
+    璁粌浣滀笟鍙傛暟閰嶇疆璇存槑濡備笅銆�
+
+   ```text
+   # ==================================鍒涘缓璁粌浣滀笟=======================================
+   # (1) 绠楁硶锛� 鍦ㄦ垜鐨勭畻娉曚腑閫夋嫨鍓嶉潰鍒涘缓鐨勭畻娉�
+   # (2) 璁粌杈撳叆锛� '/S3/yolov4/datasets/'
+   # 鍦∣BS妗�/S3/gat/鐩綍涓嬫柊寤簅utput鏂囦欢澶�
+   # (3) 璁粌杈撳嚭锛� '/S3/yolov4/output/'
+   # (4) 瓒呭弬锛�
+            "data_dir = 'obs://S3/yolov4/datasets/'"
+            "train_dir='obs://S3/yolov4/output/'"
+            "checkpoint_url='obs://S3/yolov4/preckpt/'"
+   # (5) 璁剧疆浣滀笟鏃ュ織璺緞
+            "log='obs://S3/yolov4/log/'"
+   ```
+
+3. 鍗曞嚮鈥滄彁浜も€濓紝瀹屾垚璁粌浣滀笟鐨勫垱寤恒€�
+
+   璁粌浣滀笟涓€鑸渶瑕佽繍琛屼竴娈垫椂闂达紝鏍规嵁鎮ㄩ€夋嫨鐨勬暟鎹噺鍜岃祫婧愪笉鍚岋紝璁粌鏃堕棿灏嗚€楁椂鍑犲垎閽熷乏鍙炽€傝缁冪粨鏋滄ā鍨嬪皢淇濆瓨鍦� obs://S3/gat/results/model/ 鏂囦欢澶逛笅銆�
+
+## 鏌ョ湅璁粌浠诲姟鏃ュ織
+
+1. 璁粌瀹屾垚鍚庤繘鍏ogs鏂囦欢澶癸紝鐐瑰嚮瀵瑰簲褰撴璁粌浣滀笟鐨勬棩蹇楁枃浠跺嵆鍙€�
+
+2. logs鏂囦欢澶瑰唴鐢熸垚鏃ュ織鏂囦欢锛屾偍鍙湪  /logs 鏂囦欢澶逛笅鐨勬棩蹇楁枃浠朵腑鎵惧埌濡備笅缁撴灉锛�
+
+      ```text
+      2022-03-29 13:36:59,826:INFO:epoch[0], iter[117199], loss:495.129946, per step time: 45.80 ms, fps: 21.83, lr:0.011993246152997017
+      ...
+      2022-03-29 13:53:04,842:INFO:Calculating mAP...
+      2022-03-29 14:24:23,597:INFO:result file path: /home/ma-user/modelarts/outputs/train_url_0/2022-03-29_time_11_31_12/predict_2022_03_29_14_22_47.json
+     ...
+     Accumulating evaluation results...
+     DONE (t=14.87s).
+     2022-03-29 14:27:32,440:INFO:epoch: 1, mAP:
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.000
+     Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.001
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.001
+     Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.002
+      ```
diff --git a/official/cv/yolov4/infer/convert/aipp.config b/official/cv/yolov4/infer/convert/aipp.config
new file mode 100644
index 0000000000000000000000000000000000000000..c92867780fbc3b905755e40b690bd7e34186dcf4
--- /dev/null
+++ b/official/cv/yolov4/infer/convert/aipp.config
@@ -0,0 +1,26 @@
+aipp_op {
+    aipp_mode : static
+    input_format : RGB888_U8
+    related_input_rank : 0
+    csc_switch : false
+    rbuv_swap_switch : true
+    matrix_r0c0 : 256
+    matrix_r0c1 : 0
+    matrix_r0c2 : 359
+    matrix_r1c0 : 256
+    matrix_r1c1 : -88
+    matrix_r1c2 : -183
+    matrix_r2c0 : 256
+    matrix_r2c1 : 454
+    matrix_r2c2 : 0
+    input_bias_0 : 0
+    input_bias_1 : 128
+    input_bias_2 : 128
+    
+    mean_chn_0 : 124
+    mean_chn_1 : 117
+    mean_chn_2 : 104
+    var_reci_chn_0 : 0.0171247538316637
+    var_reci_chn_1 : 0.0175070028011204
+    var_reci_chn_2 : 0.0174291938997821
+}
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/convert/air2om.sh b/official/cv/yolov4/infer/convert/air2om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..509102a0ae7bc2844c31534c464602d799860e41
--- /dev/null
+++ b/official/cv/yolov4/infer/convert/air2om.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+model_path=$1
+output_model_name=$2
+
+atc \
+    --model=$model_path \
+    --framework=1 \
+    --output=$output_model_name \
+    --input_format=NCHW --input_shape="actual_input_1:1,3,416,416" \
+    --enable_small_channel=1 \
+    --log=error \
+    --soc_version=Ascend310 \
+    --insert_op_conf=./aipp.config
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/data/models/coco_trainval_anns.py b/official/cv/yolov4/infer/data/models/coco_trainval_anns.py
new file mode 100644
index 0000000000000000000000000000000000000000..6df56a50f280cdcbf7fc0554b69b7cb4107bed5d
--- /dev/null
+++ b/official/cv/yolov4/infer/data/models/coco_trainval_anns.py
@@ -0,0 +1,88 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import argparse
+import json
+import os
+from collections import defaultdict
+import cv2
+
+parser = argparse.ArgumentParser(description='YOLOV4')
+parser.add_argument('--data_url', type=str, default='./datasets', help='coco2017 datasets')
+parser.add_argument('--train_url', type=str, default='./infer/data/models/', help='save txt file')
+parser.add_argument('--val_url', type=str, default='./infer/data/images/', help='coco2017 val infer datasets')
+args_opt, _ = parser.parse_known_args()
+
+def name_box_parse(json_path):
+    with open(json_path, encoding='utf-8') as f:
+        data = json.load(f)
+        annotations = data['annotations']
+        for ant in annotations:
+            image_id = ant['image_id']
+            name = str("%012d.jpg" % image_id)
+            cat = ant['category_id']
+
+            if 1 <= cat <= 11:
+                cat = cat - 1
+            elif 13 <= cat <= 25:
+                cat = cat - 2
+            elif 27 <= cat <= 28:
+                cat = cat - 3
+            elif 31 <= cat <= 44:
+                cat = cat - 5
+            elif 46 <= cat <= 65:
+                cat = cat - 6
+            elif cat == 67:
+                cat = cat - 7
+            elif cat == 70:
+                cat = cat - 9
+            elif 72 <= cat <= 82:
+                cat = cat - 10
+            elif 84 <= cat <= 90:
+                cat = cat - 11
+            name_box_id[name].append([ant['bbox'], cat])
+
+
+name_box_id = defaultdict(list)
+id_name = dict()
+name_box_parse(os.path.join(args_opt.data_url, 'annotations', 'instances_val2017.json'))
+
+with open(os.path.join(args_opt.train_url, 'trainval.txt'), 'w') as g:
+    ii = 0
+    for idx, key in enumerate(name_box_id.keys()):
+        print('trainval', key.split('/')[-1])
+
+        g.write('%d ' % ii)
+        ii += 1
+        g.write(os.path.join(args_opt.val_url, key))
+
+        print(os.path.join(args_opt.data_url, 'val2017', key))
+
+        img = cv2.imread(os.path.join(args_opt.data_url, 'val2017', key))
+        h, w, c = img.shape
+
+        g.write(' %d %d' % (w, h))
+
+        box_infos = name_box_id[key]
+        for info in box_infos:
+            x_min = int(info[0][0])
+            y_min = int(info[0][1])
+            x_max = x_min + int(info[0][2])
+            y_max = y_min + int(info[0][3])
+
+            box_info = " %d %d %d %d %d" % (
+                int(info[1]), x_min, y_min, x_max, y_max
+            )
+            g.write(box_info)
+        g.write('\n')
diff --git a/official/cv/yolov4/infer/data/models/object_task_metric.py b/official/cv/yolov4/infer/data/models/object_task_metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..74fca693814e4c558169aaa3726530b93be1ff32
--- /dev/null
+++ b/official/cv/yolov4/infer/data/models/object_task_metric.py
@@ -0,0 +1,151 @@
+#Copyright 2022 Huawei Technologies Co., Ltd
+#
+#Licensed under the Apache License, Version 2.0 (the "License");
+#you may not use this file except in compliance with the License.
+#You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#Unless required by applicable law or agreed to in writing, software
+#distributed under the License is distributed on an "AS IS" BASIS,
+#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#See the License for the specific language governing permissions and
+#limitations under the License.
+
+import os
+import json
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+
+
+def get_image_id(label_file):
+    """
+    :param: label file path, default is coco2017_minival.txt
+    :return: image id
+    """
+    image_list = []
+    with open(label_file, 'r')as f_read:
+        ban_list = f_read.read().split('\n')[:-1]
+        for item in ban_list:
+            image_path = item.split(' ')[1]
+            image_name = image_path.split('/')[-1]
+            image_id = image_name.split('.')[0].split('_')[-1]
+            image_list.append(int(image_id))
+    return image_list
+
+
+def get_category_id(class_id):
+    """
+    :param: class id which corresponding coco.names
+    :return: category id is used in instances_val2017.json
+    """
+    if 0 <= class_id <= 10:
+        class_id = class_id + 1
+    elif 11 <= class_id <= 23:
+        class_id = class_id + 2
+    elif 24 <= class_id <= 25:
+        class_id = class_id + 3
+    elif 26 <= class_id <= 39:
+        class_id = class_id + 5
+    elif 40 <= class_id <= 59:
+        class_id = class_id + 6
+    elif class_id == 60:
+        class_id = class_id + 7
+    elif class_id == 61:
+        class_id = class_id + 9
+    elif 62 <= class_id <= 72:
+        class_id = class_id + 10
+    elif 73 <= class_id <= 79:
+        class_id = class_id + 11
+    return class_id
+
+def get_img_set(anno_json_path):
+    """Get image path and annotation from COCO."""
+    need_img_ids = []
+    coco = COCO(anno_json_path)
+    image_ids = coco.getImgIds()
+    print("first dataset is {}".format(len(image_ids)))
+    for img_id in image_ids:
+        iscrowd = False
+        anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)
+        anno = coco.loadAnns(anno_ids)
+        for label in anno:
+            iscrowd = iscrowd or label["iscrowd"]
+
+        if iscrowd:
+            continue
+        need_img_ids.append(img_id)
+
+    return need_img_ids
+
+def get_dict_from_file(file_path, id_list):
+    """
+    :param: file_path contain all infer result
+    :param: id_list contain all images id which is corresponding instances_val2017.json
+    :return: dict_list contain infer result of every images
+    """
+    print(len(id_list))
+    ls = []
+    image_dict = {}
+    count = -1
+    with open(file_path, 'r')as fs:
+        ban_list = fs.read().split('\n')
+        for item in ban_list:
+            if item == '':
+                continue
+            if item[0] != '#':
+                count = count + 1
+                continue
+            image_list = item.split(',')
+            image_dict['image_id'] = id_list[count]
+            image_dict['category_id'] = get_category_id(int(image_list[-1].strip().split(' ')[-1]))
+            bbox_list = [float(i) for i in image_list[1].strip().split(' ')[1:]]
+            bbox_list[2] = bbox_list[2] - bbox_list[0]
+            bbox_list[3] = bbox_list[3] - bbox_list[1]
+            image_dict['bbox'] = bbox_list
+            image_dict['score'] = float(image_list[2].strip().split(' ')[-1])
+            ls.append(image_dict.copy())
+    return ls
+
+
+def get_img_id(file_name):
+    """
+    get image id list from result data
+    """
+    ls = []
+    myset = []
+    annos = json.load(open(file_name, 'r'))
+    for anno in annos:
+        ls.append(anno['image_id'])
+    myset = {}.fromkeys(ls).keys()
+    return myset
+
+
+if __name__ == "__main__":
+    ban_path = './trainval.txt'
+    input_file = './result/result.txt'
+    if not os.path.exists(ban_path):
+        print('The infer text file does not exist.')
+    if not os.path.exists(input_file):
+        print('The result text file does not exist.')
+
+    image_id_list = get_image_id(ban_path)
+    result_dict = get_dict_from_file(input_file, image_id_list)
+    json_file_name = './result.json'
+    with open(json_file_name, 'w') as f:
+        json.dump(result_dict, f)
+
+    # set iouType to 'segm', 'bbox' or 'keypoints'
+    ann_type = ('segm', 'bbox', 'keypoints')
+    # specify type here
+    ann_type = ann_type[1]
+    coco_gt_file = './instances_val2017.json'
+    coco_gt = COCO(coco_gt_file)
+    coco_dt_file = './result.json'
+
+    coco_dt = coco_gt.loadRes(coco_dt_file)
+    coco_eval = COCOeval(coco_gt, coco_dt, ann_type)
+    coco_eval.params.imgIds = get_img_set(coco_gt_file)
+    coco_eval.evaluate()
+    coco_eval.accumulate()
+    coco_eval.summarize()
diff --git a/official/cv/yolov4/infer/data/models/yolov4_coco2017_acc_test.cfg b/official/cv/yolov4/infer/data/models/yolov4_coco2017_acc_test.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..d15c07fcabc40945798ffef4fb7ce701ee8b7f12
--- /dev/null
+++ b/official/cv/yolov4/infer/data/models/yolov4_coco2017_acc_test.cfg
@@ -0,0 +1,11 @@
+# hyper-parameter
+CLASS_NUM=80
+BIASES_NUM=18
+BIASES=12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401
+SCORE_THRESH=0.001
+OBJECTNESS_THRESH=0.001
+IOU_THRESH=0.6
+YOLO_TYPE=3
+ANCHOR_DIM=3
+MODEL_TYPE=0
+RESIZE_FLAG=0
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/docker_start_infer.sh b/official/cv/yolov4/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f8686b739ce0a2ca0f9d27d9cf2ff20b5034f26d
--- /dev/null
+++ b/official/cv/yolov4/infer/docker_start_infer.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+share_dir=$2
+data_dir=$3
+echo "$1"
+echo "$2"
+if [ -z "${docker_image}" ]; then
+    echo "please input docker_image"
+    exit 1
+fi
+
+if [ ! -d "${share_dir}" ]; then
+    echo "please input share directory that contains dataset, models and codes"
+    exit 1
+fi
+
+
+docker run -it \
+    --device=/dev/davinci0 \
+    --device=/dev/davinci_manager \
+    --device=/dev/devmm_svm \
+    --device=/dev/hisi_hdc \
+    --privileged \
+    -v //usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
+    -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+    -v ${data_dir}:${data_dir}  \
+    -v ${share_dir}:${share_dir} \
+    -u root \
+    ${docker_image} \
+    /bin/bash
diff --git a/official/cv/yolov4/infer/mxbase/CMakeLists.txt b/official/cv/yolov4/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a2ba5770d37047e70013e003ca9fed9f550566d1
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,43 @@
+cmake_minimum_required(VERSION 3.10.0)
+project(Yolov4post)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+#set(PLUGIN_NAME "Yolov4_mindspore_post")
+set(TARGET_LIBRARY Yolov4_mindspore_post)
+set(TARGET_MAIN Yolov4_mindspore)
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+#message("ACL_LIB_PATH:${ACL_LIB_PATH}/lib64/.")
+#include_directories(${CMAKE_CURRENT_BINARY_DIR})
+include_directories(${ACL_LIB_PATH}/include)
+link_directories(${ACL_LIB_PATH}/lib64/)
+
+add_library(${TARGET_LIBRARY} SHARED src/PostProcess/Yolov4MindsporePost.cpp)
+
+target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0)
+target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxpidatatype mxbase)
+target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s)
+
+message("TARGET_LIBRARY:${TARGET_LIBRARY}.")
+
+add_executable(${TARGET_MAIN} src/main.cpp src/Yolov4Detection.cpp)
+target_link_libraries(${TARGET_MAIN} ${TARGET_LIBRARY} glog  cpprest mxbase libascendcl.so  opencv_world)
diff --git a/official/cv/yolov4/infer/mxbase/build.sh b/official/cv/yolov4/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..39064b27800a247e75f196c382a82ec63f8813bc
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/build.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+
+# env
+rm -r build
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make);
+    then
+      echo "make failed."
+      return 1
+    fi
+
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
diff --git a/official/cv/yolov4/infer/mxbase/infermxbase.sh b/official/cv/yolov4/infer/mxbase/infermxbase.sh
new file mode 100644
index 0000000000000000000000000000000000000000..85e7e1b5912aaae917cf347b2ead13053d563880
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/infermxbase.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 object_task_metric.py
+exit 0
diff --git a/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.cpp b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..63713a34725a4605f75af9c0d6671889b83903cd
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.cpp
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Yolov4MindsporePost.h"
+#include <algorithm>
+#include <string>
+#include <memory>
+#include "MxBase/Log/Log.h"
+#include "MxBase/CV/ObjectDetection/Nms/Nms.h"
+
+namespace {
+const int SCALE = 32;
+const int BIASESDIM = 2;
+const int OFFSETWIDTH = 2;
+const int OFFSETHEIGHT = 3;
+const int OFFSETBIASES = 1;
+const int OFFSETOBJECTNESS = 1;
+
+const int NHWC_HEIGHTINDEX = 1;
+const int NHWC_WIDTHINDEX = 2;
+const int NCHW_HEIGHTINDEX = 2;
+const int NCHW_WIDTHINDEX = 3;
+const int YOLO_INFO_DIM = 5;
+
+auto uint8Deleter = [] (uint8_t* p) { };
+}  // namespace
+
+namespace localParameter {
+    const uint32_t VECTOR_FIRST_INDEX = 0;
+    const uint32_t VECTOR_SECOND_INDEX = 1;
+    const uint32_t VECTOR_THIRD_INDEX = 2;
+    const uint32_t VECTOR_FOURTH_INDEX = 3;
+    const uint32_t VECTOR_FIFTH_INDEX = 4;
+}
+
+namespace MxBase {
+Yolov4PostProcess& Yolov4PostProcess::operator=(const Yolov4PostProcess &other) {
+    if (this == &other) {
+        return *this;
+    }
+    ObjectPostProcessBase::operator=(other);
+    objectnessThresh_ = other.objectnessThresh_;  // Threshold of objectness value
+    iouThresh_ = other.iouThresh_;
+    anchorDim_ = other.anchorDim_;
+    biasesNum_ = other.biasesNum_;
+    yoloType_ = other.yoloType_;
+    modelType_ = other.modelType_;
+    inputType_ = other.inputType_;
+    biases_ = other.biases_;
+    return *this;
+}
+
+APP_ERROR Yolov4PostProcess::Init(const std::map<std::string, std::shared_ptr<void>>& postConfig) {
+    LogDebug << "Start to Init Yolov4PostProcess.";
+    APP_ERROR ret = ObjectPostProcessBase::Init(postConfig);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Fail to superInit in ObjectPostProcessBase.";
+        return ret;
+    }
+
+    configData_.GetFileValue<int>("BIASES_NUM", biasesNum_);
+    std::string str;
+    configData_.GetFileValue<std::string>("BIASES", str);
+    configData_.GetFileValue<float>("OBJECTNESS_THRESH", objectnessThresh_);
+    configData_.GetFileValue<float>("IOU_THRESH", iouThresh_);
+    configData_.GetFileValue<int>("YOLO_TYPE", yoloType_);
+    configData_.GetFileValue<int>("MODEL_TYPE", modelType_);
+    configData_.GetFileValue<int>("YOLO_VERSION", yoloVersion_);
+    configData_.GetFileValue<int>("INPUT_TYPE", inputType_);
+    configData_.GetFileValue<int>("ANCHOR_DIM", anchorDim_);
+    ret = GetBiases(str);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Failed to get biases.";
+        return ret;
+    }
+    LogDebug << "End to Init Yolov4PostProcess.";
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4PostProcess::DeInit() {
+    return APP_ERR_OK;
+}
+
+bool Yolov4PostProcess::IsValidTensors(const std::vector<TensorBase> &tensors) {
+    if (tensors.size() != (size_t)yoloType_) {
+        LogError << "number of tensors (" << tensors.size() << ") " << "is unequal to yoloType_("
+                 << yoloType_ << ")";
+        return false;
+    }
+    if (yoloVersion_ == YOLOV4_VERSION) {
+        for (size_t i = 0; i < tensors.size(); i++) {
+            auto shape = tensors[i].GetShape();
+            if (shape.size() < localParameter::VECTOR_FIFTH_INDEX) {
+                LogError << "dimensions of tensor [" << i << "] is less than "
+                         << localParameter::VECTOR_FIFTH_INDEX << ".";
+                return false;
+            }
+            uint32_t channelNumber = 1;
+            int startIndex = modelType_ ? localParameter::VECTOR_SECOND_INDEX : localParameter::VECTOR_FOURTH_INDEX;
+            int endIndex = modelType_ ? (shape.size() - localParameter::VECTOR_THIRD_INDEX) : shape.size();
+            for (int j = startIndex; j < endIndex; j++) {
+                channelNumber *= shape[j];
+            }
+            if (channelNumber != anchorDim_ * (classNum_ + YOLO_INFO_DIM)) {
+                LogError << "channelNumber(" << channelNumber << ") != anchorDim_ * (classNum_ + 5).";
+                return false;
+            }
+        }
+    }
+    return true;
+}
+
+void Yolov4PostProcess::ObjectDetectionOutput(const std::vector<TensorBase>& tensors,
+                                              std::vector<std::vector<ObjectInfo>>& objectInfos,
+                                              const std::vector<ResizedImageInfo>& resizedImageInfos) {
+    LogDebug << "Yolov4PostProcess start to write results.";
+    if (tensors.size() == 0) {
+        return;
+    }
+    auto shape = tensors[0].GetShape();
+    if (shape.size() == 0) {
+        return;
+    }
+    uint32_t batchSize = shape[0];
+    for (uint32_t i = 0; i < batchSize; i++) {
+        std::vector<std::shared_ptr<void>> featLayerData = {};
+        std::vector<std::vector<size_t>> featLayerShapes = {};
+        for (uint32_t j = 0; j < tensors.size(); j++) {
+            auto dataPtr = reinterpret_cast<uint8_t *> (tensors[j].GetBuffer()) +
+            i * tensors[j].GetByteSize() / batchSize;
+            std::shared_ptr<void> tmpPointer;
+            tmpPointer.reset(dataPtr, uint8Deleter);
+            featLayerData.push_back(tmpPointer);
+            shape = tensors[j].GetShape();
+            std::vector<size_t> featLayerShape(shape.size());
+            transform(shape.begin(), shape.end(), featLayerShape.begin(), [](uint32_t s) { return (size_t)s; });
+            featLayerShapes.push_back(featLayerShape);
+        }
+        std::vector<ObjectInfo> objectInfo;
+        GenerateBbox(featLayerData, objectInfo, featLayerShapes, resizedImageInfos[i].widthResize,
+            resizedImageInfos[i].heightResize);
+        MxBase::NmsSort(objectInfo, iouThresh_);
+        objectInfos.push_back(objectInfo);
+    }
+    LogDebug << "Yolov4PostProcess write results success.";
+}
+
+APP_ERROR Yolov4PostProcess::Process(const std::vector<TensorBase> &tensors,
+                                     std::vector<std::vector<ObjectInfo>> &objectInfos,
+                                     const std::vector<ResizedImageInfo> &resizedImageInfos,
+                                     const std::map<std::string, std::shared_ptr<void>> &configParamMap) {
+    LogDebug << "Start to Process Yolov4PostProcess.";
+    APP_ERROR ret = APP_ERR_OK;
+    auto inputs = tensors;
+    ret = CheckAndMoveTensors(inputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "CheckAndMoveTensors failed. ret=" << ret;
+        return ret;
+    }
+
+    ObjectDetectionOutput(inputs, objectInfos, resizedImageInfos);
+
+    for (uint32_t i = 0; i < resizedImageInfos.size(); i++) {
+        CoordinatesReduction(i, resizedImageInfos[i], objectInfos[i]);
+    }
+    LogObjectInfos(objectInfos);
+    LogDebug << "End to Process Yolov4PostProcess.";
+    return APP_ERR_OK;
+}
+
+void Yolov4PostProcess::CompareProb(int& classID, float& maxProb, float classProb, int classNum) {
+    if (classProb > maxProb) {
+        maxProb = classProb;
+        classID = classNum;
+    }
+}
+
+void Yolov4PostProcess::SelectClassNHWC(std::shared_ptr<void> netout, NetInfo info,
+                                          std::vector<MxBase::ObjectInfo>& detBoxes, int stride) {
+    const int offsetY = 1;
+    for (int j = 0; j < stride; ++j) {
+        for (int k = 0; k < info.anchorDim; ++k) {
+            int bIdx = (info.bboxDim + 1 + info.classNum) * info.anchorDim * j +
+                       k * (info.bboxDim + 1 + info.classNum);
+            int oIdx = bIdx + info.bboxDim;  // objectness index
+            float objectness = static_cast<float *>(netout.get())[oIdx];
+            if (objectness <= objectnessThresh_) {
+                continue;
+            }
+            int classID = -1;
+            float maxProb = scoreThresh_;
+            for (int c = 0; c < info.classNum; ++c) {
+                float clsProb = static_cast<float *>(netout.get())[bIdx +
+                    (info.bboxDim + OFFSETOBJECTNESS + c)] * objectness;
+                CompareProb(classID, maxProb, clsProb, c);
+            }
+            if (classID < 0) continue;
+            MxBase::ObjectInfo det;
+            float x = static_cast<float *>(netout.get())[bIdx];
+            float y = static_cast<float *>(netout.get())[bIdx + offsetY];
+            float width = static_cast<float *>(netout.get())[bIdx + OFFSETWIDTH];
+            float height = static_cast<float *>(netout.get())[bIdx + OFFSETHEIGHT];
+            det.x0 = std::max(0.0f, x - width / COORDINATE_PARAM);
+            det.x1 = std::min(1.0f, x + width / COORDINATE_PARAM);
+            det.y0 = std::max(0.0f, y - height / COORDINATE_PARAM);
+            det.y1 = std::min(1.0f, y + height / COORDINATE_PARAM);
+            det.classId = classID;
+            det.className = configData_.GetClassName(classID);
+            det.confidence = maxProb;
+            if (det.confidence < separateScoreThresh_[classID]) continue;
+            detBoxes.emplace_back(det);
+        }
+    }
+}
+
+void Yolov4PostProcess::GenerateBbox(std::vector<std::shared_ptr<void>> featLayerData,
+                                     std::vector<MxBase::ObjectInfo> &detBoxes,
+                                     const std::vector<std::vector<size_t>>& featLayerShapes, const int netWidth,
+                                     const int netHeight) {
+    NetInfo netInfo;
+    netInfo.anchorDim = anchorDim_;
+    netInfo.bboxDim = BOX_DIM;
+    netInfo.classNum = classNum_;
+    netInfo.netWidth = netWidth;
+    netInfo.netHeight = netHeight;
+    for (int i = 0; i < yoloType_; ++i) {
+        int widthIndex_ = modelType_ ? NCHW_WIDTHINDEX : NHWC_WIDTHINDEX;
+        int heightIndex_ = modelType_ ? NCHW_HEIGHTINDEX : NHWC_HEIGHTINDEX;
+        OutputLayer layer = {featLayerShapes[i][widthIndex_], featLayerShapes[i][heightIndex_]};
+        int logOrder = log(featLayerShapes[i][widthIndex_] * SCALE / netWidth) / log(BIASESDIM);
+        int startIdx = (yoloType_ - 1 - logOrder) * netInfo.anchorDim * BIASESDIM;
+        int endIdx = startIdx + netInfo.anchorDim * BIASESDIM;
+        int idx = 0;
+        for (int j = startIdx; j < endIdx; ++j) {
+            layer.anchors[idx++] = biases_[j];
+        }
+        int stride = layer.width * layer.height;
+        std::shared_ptr<void> netout = featLayerData[i];
+        SelectClassNHWC(netout, netInfo, detBoxes, stride);
+    }
+}
+
+APP_ERROR Yolov4PostProcess::GetBiases(std::string& strBiases) {
+    if (biasesNum_ <= 0) {
+        LogError << GetError(APP_ERR_COMM_INVALID_PARAM) << "Failed to get biasesNum (" << biasesNum_ << ").";
+        return APP_ERR_COMM_INVALID_PARAM;
+    }
+    biases_.clear();
+    int i = 0;
+    int num = strBiases.find(",");
+    while (num >= 0 && i < biasesNum_) {
+        std::string tmp = strBiases.substr(0, num);
+        num++;
+        strBiases = strBiases.substr(num, strBiases.size());
+        biases_.push_back(stof(tmp));
+        i++;
+        num = strBiases.find(",");
+    }
+    if (i != biasesNum_ - 1 || strBiases.size() == 0) {
+        LogError << GetError(APP_ERR_COMM_INVALID_PARAM) << "biasesNum (" << biasesNum_
+                 << ") is not equal to total number of biases (" << strBiases <<").";
+        return APP_ERR_COMM_INVALID_PARAM;
+    }
+    biases_.push_back(stof(strBiases));
+    return APP_ERR_OK;
+}
+
+#ifndef ENABLE_POST_PROCESS_INSTANCE
+extern "C" {
+std::shared_ptr<MxBase::Yolov4PostProcess> GetObjectInstance() {
+    LogInfo << "Begin to get Yolov4PostProcess instance.";
+    auto instance = std::make_shared<Yolov4PostProcess>();
+    LogInfo << "End to get Yolov4PostProcess instance.";
+    return instance;
+}
+}
+#endif
+}  // namespace MxBase
diff --git a/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.h b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.h
new file mode 100644
index 0000000000000000000000000000000000000000..b9afe9ab326f72b5d689d9d27cd2ec817b34a343
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/PostProcess/Yolov4MindsporePost.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef YOLOV4_POST_PROCESS_H
+#define YOLOV4_POST_PROCESS_H
+#include <algorithm>
+#include <vector>
+#include <map>
+#include <string>
+#include <memory>
+#include <opencv4/opencv2/opencv.hpp>
+#include "MxBase/ErrorCode/ErrorCode.h"
+#include "MxBase/CV/Core/DataType.h"
+#include "MxBase/PostProcessBases/ObjectPostProcessBase.h"
+
+const float DEFAULT_OBJECTNESS_THRESH = 0.3;
+const float DEFAULT_IOU_THRESH = 0.45;
+const int DEFAULT_ANCHOR_DIM = 3;
+const int DEFAULT_BIASES_NUM = 18;
+const int DEFAULT_YOLO_TYPE = 3;
+const int DEFAULT_YOLO_VERSION = 4;
+const int YOLOV3_VERSION = 3;
+const int YOLOV4_VERSION = 4;
+const int YOLOV5_VERSION = 5;
+const int ANCHOR_NUM = 6;
+struct OutputLayer {
+    size_t width;
+    size_t height;
+    float anchors[ANCHOR_NUM];
+};
+
+struct NetInfo {
+    int anchorDim;
+    int classNum;
+    int bboxDim;
+    int netWidth;
+    int netHeight;
+};
+
+namespace MxBase {
+class Yolov4PostProcess : public ObjectPostProcessBase {
+ public:
+     Yolov4PostProcess() = default;
+
+     ~Yolov4PostProcess() = default;
+
+     Yolov4PostProcess(const Yolov4PostProcess &other) = default;
+
+     Yolov4PostProcess &operator=(const Yolov4PostProcess &other);
+
+     APP_ERROR Init(const std::map<std::string, std::shared_ptr<void>> &postConfig) override;
+
+     APP_ERROR DeInit() override;
+
+     APP_ERROR Process(const std::vector<TensorBase> &tensors, std::vector<std::vector<ObjectInfo>> &objectInfos,
+                      const std::vector<ResizedImageInfo> &resizedImageInfos = {},
+                      const std::map<std::string, std::shared_ptr<void>> &configParamMap = {}) override;
+
+ protected:
+     bool IsValidTensors(const std::vector<TensorBase> &tensors);
+
+     void ObjectDetectionOutput(const std::vector<TensorBase> &tensors,
+                               std::vector<std::vector<ObjectInfo>> &objectInfos,
+                               const std::vector<ResizedImageInfo> &resizedImageInfos = {});
+
+     void CompareProb(int& classID, float& maxProb, float classProb, int classNum);
+     void SelectClassNHWC(std::shared_ptr<void> netout, NetInfo info, std::vector<MxBase::ObjectInfo>& detBoxes,
+                         int stride);
+     void GenerateBbox(std::vector<std::shared_ptr<void>> featLayerData,
+                      std::vector<MxBase::ObjectInfo> &detBoxes,
+                      const std::vector<std::vector<size_t>>& featLayerShapes,
+                      const int netWidth, const int netHeight);
+     APP_ERROR GetBiases(std::string& strBiases);
+
+ protected:
+     float objectnessThresh_ = DEFAULT_OBJECTNESS_THRESH;  // Threshold of objectness value
+     float iouThresh_ = DEFAULT_IOU_THRESH;  // Non-Maximum Suppression threshold
+     int anchorDim_ = DEFAULT_ANCHOR_DIM;
+     int biasesNum_ = DEFAULT_BIASES_NUM;  // anchors, generate from train data, coco dataset
+     int yoloType_ = DEFAULT_YOLO_TYPE;
+     int modelType_ = 0;
+     int yoloVersion_ = DEFAULT_YOLO_VERSION;
+     int inputType_ = 0;
+     std::vector<float> biases_ = {};
+};
+#ifndef ENABLE_POST_PROCESS_INSTANCE
+extern "C" {
+std::shared_ptr<MxBase::Yolov4PostProcess> GetObjectInstance();
+}
+#endif
+}  // namespace MxBase
+#endif
diff --git a/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.cpp b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c520c13d5094dab3fce5917183294eb0336d3cf8
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Yolov4Detection.h"
+#include <unistd.h>
+#include <sys/stat.h>
+#include <utility>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+namespace {
+const uint32_t YUV_BYTE_NU = 3;
+const uint32_t YUV_BYTE_DE = 2;
+const uint32_t VPC_H_ALIGN = 2;
+}  // namespace
+
+APP_ERROR Yolov4DetectionOpencv::LoadLabels(const std::string &labelPath, std::map<int, std::string> &labelMap) {
+    std::ifstream infile;
+    // open label file
+    infile.open(labelPath, std::ios_base::in);
+    std::string s;
+    // check label file validity
+    if (infile.fail()) {
+        LogError << "Failed to open label file: " << labelPath << ".";
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    labelMap.clear();
+    // construct label map
+    int count = 0;
+    while (std::getline(infile, s)) {
+        if (s[0] == '#') {
+            continue;
+        }
+        size_t eraseIndex = s.find_last_not_of("\r\n\t");
+        if (eraseIndex != std::string::npos) {
+            s.erase(eraseIndex + 1, s.size() - eraseIndex);
+        }
+        labelMap.insert(std::pair<int, std::string>(count, s));
+        count++;
+    }
+    infile.close();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
+    ret = dvppWrapper_->Init();
+    if (ret != APP_ERR_OK) {
+        LogError << "DvppWrapper init failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    MxBase::ConfigData configData;
+    const std::string checkTensor = initParam.checkTensor ? "true" : "false";
+    configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum));
+    configData.SetJsonValue("BIASES_NUM", std::to_string(initParam.biasesNum));
+    configData.SetJsonValue("BIASES", initParam.biases);
+    configData.SetJsonValue("OBJECTNESS_THRESH", initParam.objectnessThresh);
+    configData.SetJsonValue("IOU_THRESH", initParam.iouThresh);
+    configData.SetJsonValue("SCORE_THRESH", initParam.scoreThresh);
+    configData.SetJsonValue("YOLO_TYPE", std::to_string(initParam.yoloType));
+    configData.SetJsonValue("MODEL_TYPE", std::to_string(initParam.modelType));
+    configData.SetJsonValue("INPUT_TYPE", std::to_string(initParam.inputType));
+    configData.SetJsonValue("ANCHOR_DIM", std::to_string(initParam.anchorDim));
+    configData.SetJsonValue("CHECK_MODEL", checkTensor);
+
+    auto jsonStr = configData.GetCfgJson().serialize();
+    std::map<std::string, std::shared_ptr<void>> config;
+    config["postProcessConfigContent"] = std::make_shared<std::string>(jsonStr);
+    config["labelPath"] = std::make_shared<std::string>(initParam.labelPath);
+
+    post_ = std::make_shared<MxBase::Yolov4PostProcess>();
+    ret = post_->Init(config);
+    if (ret != APP_ERR_OK) {
+        LogError << "Resnet50PostProcess init failed, ret=" << ret << ".";
+        return ret;
+    }
+    // load labels from file
+    ret = LoadLabels(initParam.labelPath, labelMap_);
+    if (ret != APP_ERR_OK) {
+        LogError << "Failed to load labels, ret=" << ret << ".";
+        return ret;
+    }
+    LogInfo << "End to Init Yolov4DetectionOpencv.";
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::DeInit() {
+    dvppWrapper_->DeInit();
+    model_->DeInit();
+    post_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::ReadImage(const std::string &imgPath, cv::Mat &imageMat) {
+    imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    imageWidth_ = imageMat.cols;
+    imageHeight_ = imageMat.rows;
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat) {
+    static constexpr uint32_t resizeHeight = 608;
+    static constexpr uint32_t resizeWidth = 608;
+    cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight));
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase) {
+    const uint32_t dataSize = imageMat.cols * imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU;
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(imageMat.data, dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+    std::vector<uint32_t> shape = {imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(imageMat.cols)};
+    tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT8);
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Inference(const std::vector<MxBase::TensorBase> &inputs,
+    std::vector<MxBase::TensorBase> &outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs.push_back(tensor);
+    }
+
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    g_inferCost.push_back(costMs);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::PostProcess(const std::vector<MxBase::TensorBase> &outputs,
+                                             std::vector<std::vector<MxBase::ObjectInfo>> &objInfos) {
+    MxBase::ResizedImageInfo imgInfo;
+    imgInfo.widthOriginal = imageWidth_;
+    imgInfo.heightOriginal = imageHeight_;
+    imgInfo.widthResize = 608;
+    imgInfo.heightResize = 608;
+    imgInfo.resizeType = MxBase::RESIZER_STRETCHING;
+    std::vector<MxBase::ResizedImageInfo> imageInfoVec = {};
+    imageInfoVec.push_back(imgInfo);
+    APP_ERROR ret = post_->Process(outputs, objInfos, imageInfoVec);
+    if (ret != APP_ERR_OK) {
+        LogError << "Process failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::WriteResult(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos) {
+    std::string resultPathName = "result";
+    uint32_t batchSize = objInfos.size();
+    // create result directory when it does not exit
+    if (access(resultPathName.c_str(), 0) != 0) {
+        int ret = mkdir(resultPathName.c_str(), S_IRUSR | S_IWUSR | S_IXUSR);
+        if (ret != 0) {
+            LogError << "Failed to create result directory: " << resultPathName << ", ret = " << ret;
+            return APP_ERR_COMM_OPEN_FAIL;
+        }
+    }
+    // create result file under result directory
+    resultPathName = resultPathName + "/result.txt";
+    std::ofstream tfile(resultPathName, std::ofstream::app);
+    if (tfile.fail()) {
+        LogError << "Failed to open result file: " << resultPathName;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    // write inference result into file
+    for (uint32_t i = 0; i < batchSize; i++) {
+        tfile << "Object detected num is " << objInfos[i].size() << std::endl;
+        for (uint32_t j = 0; j < objInfos[i].size(); j++) {
+            tfile << "#Obj: " << j << ", box: " << objInfos[i][j].x0 << " " << objInfos[i][j].y0 << " "
+                << objInfos[i][j].x1 << " " << objInfos[i][j].y1
+                << ", confidence: " << objInfos[i][j].confidence << ", label: " << labelMap_[objInfos[i][j].classId]
+                << ", id: " << objInfos[i][j].classId << std::endl;
+        }
+    }
+
+    tfile.close();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Yolov4DetectionOpencv::Process(const std::string &imgPath) {
+    // process image
+    cv::Mat imageMat;
+    APP_ERROR ret = ReadImage(imgPath, imageMat);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImage failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = Resize(imageMat, imageMat);
+    if (ret != APP_ERR_OK) {
+        LogError << "Resize failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs = {};
+    MxBase::TensorBase tensorBase;
+    ret = CVMatToTensorBase(imageMat, tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    inputs.push_back(tensorBase);
+    ret = Inference(inputs, outputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::vector<std::vector<MxBase::ObjectInfo>> objInfos;
+    ret = PostProcess(outputs, objInfos);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = WriteResult(objInfos);
+    if (ret != APP_ERR_OK) {
+        LogError << "Save result failed, ret=" << ret << ".";
+        return ret;
+    }
+    imageMat.release();
+    return APP_ERR_OK;
+}
diff --git a/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.h b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.h
new file mode 100644
index 0000000000000000000000000000000000000000..eec33855d38f892895a284b9292ce6b2a14f3c50
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/Yolov4Detection.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_YOLOV4DETECTIONOPENCV_H
+#define MXBASE_YOLOV4DETECTIONOPENCV_H
+
+#include <vector>
+#include <memory>
+#include <map>
+#include <string>
+#include <opencv2/opencv.hpp>
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "PostProcess/Yolov4MindsporePost.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+extern std::vector<double> g_inferCost;
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string labelPath;
+    bool checkTensor;
+    std::string modelPath;
+    uint32_t classNum;
+    uint32_t biasesNum;
+    std::string biases;
+    std::string objectnessThresh;
+    std::string iouThresh;
+    std::string scoreThresh;
+    uint32_t yoloType;
+    uint32_t modelType;
+    uint32_t inputType;
+    uint32_t anchorDim;
+};
+
+class Yolov4DetectionOpencv {
+ public:
+     APP_ERROR Init(const InitParam &initParam);
+     APP_ERROR DeInit();
+     APP_ERROR ReadImage(const std::string &imgPath, cv::Mat &imageMat);
+     APP_ERROR Resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat);
+     APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase);
+     APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
+     APP_ERROR PostProcess(const std::vector<MxBase::TensorBase> &outputs,
+                          std::vector<std::vector<MxBase::ObjectInfo>> &objInfos);
+     APP_ERROR Process(const std::string &imgPath);
+     APP_ERROR LoadLabels(const std::string &labelPath, std::map<int, std::string> &labelMap);
+     APP_ERROR WriteResult(const std::vector<std::vector<MxBase::ObjectInfo>> &objInfos);
+ private:
+     std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
+     std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+     std::shared_ptr<MxBase::Yolov4PostProcess> post_;
+     MxBase::ModelDesc modelDesc_;
+     std::map<int, std::string> labelMap_;
+     uint32_t deviceId_ = 0;
+     uint32_t imageWidth_ = 0;
+     uint32_t imageHeight_ = 0;
+};
+#endif
diff --git a/official/cv/yolov4/infer/mxbase/src/main.cpp b/official/cv/yolov4/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8c5d2298f25717db90ae474fdac5ecc511c5a321
--- /dev/null
+++ b/official/cv/yolov4/infer/mxbase/src/main.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <vector>
+#include "Yolov4Detection.h"
+#include "MxBase/Log/Log.h"
+
+std::vector<double> g_inferCost;
+
+void SplitString(const std::string &s, std::vector<std::string> *v, const std::string &c) {
+    std::string::size_type pos1, pos2;
+    pos2 = s.find(c);
+    pos1 = 0;
+    while (std::string::npos != pos2) {
+        v->push_back(s.substr(pos1, pos2 - pos1));
+
+        pos1 = pos2 + c.size();
+        pos2 = s.find(c, pos1);
+    }
+
+    if (pos1 != s.length()) {
+        v->push_back(s.substr(pos1));
+    }
+}
+
+void InitYolov4Param(InitParam *initParam) {
+    initParam->deviceId = 0;
+    initParam->labelPath = "../data/models/coco2017.names";
+    initParam->checkTensor = true;
+    initParam->modelPath = "../data/models/yolov4.om";
+    initParam->classNum = 80;
+    initParam->biasesNum = 18;
+    initParam->biases = "12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401";
+    initParam->objectnessThresh = "0.001";
+    initParam->iouThresh = "0.6";
+    initParam->scoreThresh = "0.001";
+    initParam->yoloType = 3;
+    initParam->modelType = 0;
+    initParam->inputType = 0;
+    initParam->anchorDim = 3;
+}
+
+APP_ERROR ReadImagesPath(const std::string &path, std::vector<std::string> *imagesPath) {
+    std::ifstream inFile;
+    inFile.open(path, std::ios_base::in);
+    std::string line;
+    // Check images path file validity
+    if (inFile.fail()) {
+        LogError << "Failed to open label file: " << path;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    std::vector<std::string> vectorStr;
+    std::string splitStr = " ";
+    // construct label map
+    while (std::getline(inFile, line)) {
+        if (line[0] == '#') {
+            continue;
+        }
+        vectorStr.clear();
+        SplitString(line, &vectorStr, splitStr);
+        imagesPath->push_back(vectorStr[1]);
+    }
+
+    inFile.close();
+    return APP_ERR_OK;
+}
+
+int main(int argc, char* argv[]) {
+    if (argc <= 1) {
+        LogWarn << "Please input image path, such as './yolov4 infer.txt'.";
+        return APP_ERR_OK;
+    }
+    InitParam initParam;
+    InitYolov4Param(&initParam);
+    auto yolov4 = std::make_shared<Yolov4DetectionOpencv>();
+    APP_ERROR ret = yolov4->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "Yolov4DetectionOpencv init failed, ret=" << ret << ".";
+        return ret;
+    }
+    LogInfo << "End to Init yolov4.";
+    std::string inferText = argv[1];
+    std::vector<std::string> imagesPath;
+    ret = ReadImagesPath(inferText, &imagesPath);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImagesPath failed, ret=" << ret << ".";
+        return ret;
+    }
+    for (uint32_t i = 0; i < imagesPath.size(); i++) {
+        LogInfo << "read image path " << imagesPath[i];
+        ret = yolov4->Process(imagesPath[i]);
+        if (ret != APP_ERR_OK) {
+            LogError << "Yolov4DetectionOpencv process failed, ret=" << ret << ".";
+            yolov4->DeInit();
+            return ret;
+        }
+    }
+    yolov4->DeInit();
+    double costSum = 0;
+    for (uint32_t i = 0; i < g_inferCost.size(); i++) {
+        costSum += g_inferCost[i];
+    }
+    LogInfo << "Infer images sum " << g_inferCost.size() << ", cost total time: " << costSum << " ms.";
+    LogInfo << "The throughput: " << g_inferCost.size() * 1000 / costSum << " images/sec.";
+    return APP_ERR_OK;
+}
diff --git a/official/cv/yolov4/infer/sdk/config/yolov4.pipeline b/official/cv/yolov4/infer/sdk/config/yolov4.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..9bdccf1e2464708dcbf8685e58af80aa54693bc8
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/config/yolov4.pipeline
@@ -0,0 +1,65 @@
+{
+    "im_yolov4": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_imagedecoder0"
+        },
+        "mxpi_imagedecoder0": {
+            "props": {
+                "handleMethod": "opencv"
+            },
+            "factory": "mxpi_imagedecoder",
+            "next": "mxpi_imageresize0"
+        },
+        "mxpi_imageresize0": {
+            "props": {
+                "parentName": "mxpi_imagedecoder0",
+                "handleMethod": "opencv",
+                "resizeHeight": "608",
+                "resizeWidth": "608",
+                "resizeType": "Resizer_Stretch"
+            },
+            "factory": "mxpi_imageresize",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "mxpi_imageresize0",
+                "modelPath": "../data/models/yolov4.om",
+                "waitingTime": "3000",
+                "outputDeviceId": "-1"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_objectpostprocessor0"
+        },
+        "mxpi_objectpostprocessor0": {
+            "props": {
+                "dataSource": "mxpi_tensorinfer0",
+                "postProcessConfigPath": "../data/models/yolov4_coco2017_acc_test.cfg",
+                "labelPath": "../data/models/coco2017.names",
+                "postProcessLibPath": "./mxpi/build/libyolov4_mindspore_post.so"
+            },
+            "factory": "mxpi_objectpostprocessor",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_objectpostprocessor0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+    }
+}
diff --git a/official/cv/yolov4/infer/sdk/infersdk.sh b/official/cv/yolov4/infer/sdk/infersdk.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e941bc2513d2747b58b37470ddccc907ddb6c290
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/infersdk.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 object_task_metric.py
+exit 0
diff --git a/official/cv/yolov4/infer/sdk/main.py b/official/cv/yolov4/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..df8f9018c0e894a9b1fe5233c81a20fc2650cd7a
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/main.py
@@ -0,0 +1,121 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import json
+import os
+from StreamManagerApi import StreamManagerApi, MxDataInput
+
+
+def read_file_list(input_file):
+    """
+    :param infer file content:
+        0 xxx/xxx/a.jpg 1920 1080 0 453 369 473 391 1 588 245 608 268
+        1 xxx/xxx/b.jpg 1920 1080 1 466 403 485 422 2 793 300 809 320
+        ...
+    :return image path list
+    """
+    image_file_list = []
+    if not os.path.exists(input_file):
+        print('input file does not exists.')
+    with open(input_file, "r") as fs:
+        for line in fs.readlines():
+            line = line.strip('\n').split(' ')[1]
+            image_file_list.append(line)
+    return image_file_list
+
+
+def save_infer_result(result_dir, result):
+    """
+    save infer result to the file, Write format:
+        Object detected num is 5
+        #Obj: 1, box: 453 369 473 391, confidence: 0.3, label: person, id: 0
+        ...
+    :param result_dir is the dir of save result
+    :param result content bbox and class_id of all object
+    """
+    load_dict = json.loads(result)
+    if load_dict.get('MxpiObject') is None:
+        with open(result_dir + '/result.txt', 'a+') as f_write:
+            f_write.write("")
+    else:
+        res_vec = load_dict.get('MxpiObject')
+        with open(result_dir + '/result.txt', 'a+') as f_write:
+            object_list = 'Object detected num is ' + str(len(res_vec)) + '\n'
+            f_write.writelines(object_list)
+            for index, object_item in enumerate(res_vec):
+                class_info = object_item.get('classVec')[0]
+                object_info = '#Obj: ' + str(index) + ', box: ' + \
+                              str(object_item.get('x0')) + ' ' + \
+                              str(object_item.get('y0')) + ' ' + \
+                              str(object_item.get('x1')) + ' ' + \
+                              str(object_item.get('y1')) + ', confidence: ' + \
+                              str(class_info.get('confidence')) + ', label: ' + \
+                              class_info.get('className') + ', id: ' + \
+                              str(class_info.get('classId')) + '\n'
+                f_write.writelines(object_info)
+
+
+if __name__ == '__main__':
+    # init stream manager
+    stream_manager = StreamManagerApi()
+    ret = stream_manager.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        exit()
+
+    # create streams by pipeline config file
+    with open("./config/yolov4.pipeline", 'rb') as f:
+        pipeline = f.read()
+    ret = stream_manager.CreateMultipleStreams(pipeline)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        exit()
+
+    # Construct the input of the stream
+    data_input = MxDataInput()
+
+    infer_file = './trainval.txt'
+    file_list = read_file_list(infer_file)
+    res_dir_name = 'result'
+    if not os.path.exists(res_dir_name):
+        os.makedirs(res_dir_name)
+
+    for file_path in file_list:
+        print(file_path)
+        file_name = file_path.split('/')[-1]
+        if not (file_name.lower().endswith(".jpg") or file_name.lower().endswith(".jpeg")):
+            continue
+
+        with open(file_path, 'rb') as f:
+            data_input.data = f.read()
+
+        # Inputs data to a specified stream based on streamName.
+        stream_name = b'im_yolov4'
+        inplugin_id = 0
+        unique_id = stream_manager.SendData(stream_name, inplugin_id, data_input)
+        if unique_id < 0:
+            print("Failed to send data to stream.")
+            exit()
+        # Obtain the inference result by specifying streamName and uniqueId.
+        mstimeout = 5000
+        infer_result = stream_manager.GetResult(stream_name, unique_id, mstimeout)
+        if infer_result.errorCode != 0:
+            print("GetResultWithUniqueId error. errorCode=%d, errorMsg=%s" % (
+                infer_result.errorCode, infer_result.data.decode()))
+            exit()
+        save_infer_result(res_dir_name, infer_result.data.decode())
+
+
+    # destroy streams
+    stream_manager.DestroyAllStreams()
diff --git a/official/cv/yolov4/infer/sdk/mxpi/CMakeLists.txt b/official/cv/yolov4/infer/sdk/mxpi/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c23e1fe9049e14f8b834e3a8758a1e5a0bbcc7d6
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/mxpi/CMakeLists.txt
@@ -0,0 +1,38 @@
+cmake_minimum_required(VERSION 3.10.0)
+project(yolov4post)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+set(PLUGIN_NAME "yolov4_mindspore_post")
+set(TARGET_LIBRARY ${PLUGIN_NAME})
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+#message("ACL_LIB_PATH:${ACL_LIB_PATH}.")
+#include_directories(${CMAKE_CURRENT_BINARY_DIR})
+include_directories(${ACL_LIB_PATH}/include)
+
+add_library(${TARGET_LIBRARY} SHARED ../../mxbase/src/PostProcess/Yolov4MindsporePost.cpp
+../../mxbase/src/PostProcess/Yolov4MindsporePost.h)
+
+target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0)
+target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxpidatatype mxbase)
+target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s)
+
+install(TARGETS ${TARGET_LIBRARY} LIBRARY DESTINATION $ENV{MX_SDK_HOME}/lib/modelpostprocessors/)
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/sdk/mxpi/build.sh b/official/cv/yolov4/infer/sdk/mxpi/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..07728e78c907ed8c930f680f56dc5663c872f8bc
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/mxpi/build.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+
+# env
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make && make install);
+    then
+      echo "make failed."
+      return 1
+    fi
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: The plugin of post processor install successfully."
+else
+  echo "ERROR: The plugin of post processor install failed."
+fi
+
+cd - || exit
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/sdk/requirements.txt b/official/cv/yolov4/infer/sdk/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e46171c3419259ca73c0faf3737e0431dded717f
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/requirements.txt
@@ -0,0 +1,3 @@
+opencv-python
+tqdm
+pycocotools
\ No newline at end of file
diff --git a/official/cv/yolov4/infer/sdk/run.sh b/official/cv/yolov4/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7dd7947e5786d0fd61142053f8318d028d1b9803
--- /dev/null
+++ b/official/cv/yolov4/infer/sdk/run.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 main.py
+exit 0
diff --git a/official/cv/yolov4/modelarts/modelarts.py b/official/cv/yolov4/modelarts/modelarts.py
new file mode 100644
index 0000000000000000000000000000000000000000..b70331f23004ac13349a5a76cd7ba33f6a64edab
--- /dev/null
+++ b/official/cv/yolov4/modelarts/modelarts.py
@@ -0,0 +1,335 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""YoloV4 train."""
+import argparse
+import os
+import time
+import datetime
+import numpy as np
+
+import mindspore
+from mindspore.context import ParallelMode
+from mindspore.nn.optim.momentum import Momentum
+from mindspore import Tensor
+import mindspore.nn as nn
+from mindspore import context
+from mindspore.communication.management import init, get_rank, get_group_size
+from mindspore.train.serialization import export, load_checkpoint, load_param_into_net
+from mindspore.train.callback import ModelCheckpoint, RunContext
+from mindspore.train.callback import CheckpointConfig
+from mindspore.common import set_seed
+from mindspore.profiler.profiling import Profiler
+
+from src.yolo import YOLOV4CspDarkNet53, YoloWithLossCell, TrainingWrapper
+from src.logger import get_logger
+from src.util import AverageMeter, get_param_groups
+from src.lr_scheduler import get_lr
+from src.yolo_dataset import create_yolo_dataset
+from src.initializer import default_recurisive_init, load_yolov4_params
+from src.eval_utils import apply_eval, EvalCallBack
+
+from model_utils.config import config
+from model_utils.moxing_adapter import moxing_wrapper
+from model_utils.device_adapter import get_device_id, get_device_num
+
+set_seed(1)
+parser = argparse.ArgumentParser(description='YOLOV4')
+parser.add_argument('--enable_modelarts', type=bool, default='True', help='use modelarts')
+parser.add_argument('--data_url', type=str, default='', help='Dataset directory')
+parser.add_argument('--train_url', type=str, default='', help='The path model saved')
+parser.add_argument('--checkpoint_url', type=str, default='', help='The path pre-model saved')
+parser.add_argument('--is_distributed', type=int, default=0, help='do not distributed')
+parser.add_argument('--warmup_epochs', type=int, default=1, help='warmup epoch')
+parser.add_argument('--epoch', type=int, default=1, help='train epoch')
+parser.add_argument('--training_shape', type=int, default=416, help='training shape')
+args_opt, _ = parser.parse_known_args()
+
+def set_default():
+    os.makedirs(config.output_path, exist_ok=True)
+    os.makedirs(config.data_path, exist_ok=True)
+
+    config.run_eval = True
+    config.eval_start_epoch = 0
+    config.max_epoch = args_opt.epoch
+    config.warmup_epochs = args_opt.warmup_epochs
+    config.is_distributed = args_opt.is_distributed
+    config.enable_modelarts = args_opt.enable_modelarts
+    config.checkpoint_url = args_opt.checkpoint_url
+    config.pretrained_backbone = args_opt.checkpoint_url
+    config.training_shape = args_opt.training_shape
+    config.per_batch_size = 1
+    config.file_name = os.path.join(args_opt.train_url, "yolov4")
+    if config.lr_scheduler == 'cosine_annealing' and config.max_epoch > config.t_max:
+        config.t_max = config.max_epoch
+
+    config.lr_epochs = list(map(int, config.lr_epochs.split(',')))
+    config.data_root = os.path.join(args_opt.data_url, 'train2017')
+    config.annFile = os.path.join(args_opt.data_url, 'annotations/instances_train2017.json')
+
+    config.data_val_root = os.path.join(args_opt.data_url, 'val2017')
+    config.ann_val_file = os.path.join(args_opt.data_url, 'annotations/instances_val2017.json')
+
+    device_id = int(os.getenv('DEVICE_ID', '0'))
+    context.set_context(mode=context.GRAPH_MODE,
+                        device_target=config.device_target, save_graphs=False, device_id=device_id)
+
+    if config.need_profiler:
+        profiler = Profiler(output_path=config.checkpoint_url, is_detail=True, is_show_op_path=True)
+    else:
+        profiler = None
+
+    # init distributed
+    if config.is_distributed:
+        init()
+        config.rank = get_rank()
+        config.group_size = get_group_size()
+    else:
+        config.rank = 0
+        config.group_size = 1
+
+    # select for master rank save ckpt or all rank save, compatible for model parallel
+    config.rank_save_ckpt_flag = 0
+    if config.is_save_on_master:
+        if config.rank == 0:
+            config.rank_save_ckpt_flag = 1
+    else:
+        config.rank_save_ckpt_flag = 1
+
+    # logger
+    config.outputs_dir = os.path.join(args_opt.train_url,
+                                      datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
+    config.logger = get_logger(config.outputs_dir, config.rank)
+    config.logger.save_args(config)
+
+    return profiler
+
+
+class InternalCallbackParam(dict):
+    """Internal callback object's parameters."""
+
+    def __getattr__(self, key):
+        return self[key]
+
+    def __setattr__(self, key, value):
+        self[key] = value
+
+
+class BuildTrainNetwork(nn.Cell):
+    def __init__(self, network_, criterion):
+        super(BuildTrainNetwork, self).__init__()
+        self.network = network_
+        self.criterion = criterion
+
+    def construct(self, input_data, label):
+        output = self.network(input_data)
+        loss_ = self.criterion(output, label)
+        return loss_
+
+
+def modelarts_pre_process():
+    '''modelarts pre process function.'''
+    def unzip(zip_file, save_dir):
+        import zipfile
+        s_time = time.time()
+        if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
+            zip_isexist = zipfile.is_zipfile(zip_file)
+            if zip_isexist:
+                fz = zipfile.ZipFile(zip_file, 'r')
+                data_num = len(fz.namelist())
+                print("Extract Start...")
+                print("unzip file num: {}".format(data_num))
+                data_print = int(data_num / 100) if data_num > 100 else 1
+                i = 0
+                for file in fz.namelist():
+                    if i % data_print == 0:
+                        print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
+                    i += 1
+                    fz.extract(file, save_dir)
+                print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
+                                                     int(int(time.time() - s_time) % 60)))
+                print("Extract Done.")
+            else:
+                print("This is not zip.")
+        else:
+            print("Zip has been extracted.")
+
+    if config.need_modelarts_dataset_unzip:
+        zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
+        save_dir_1 = os.path.join(config.data_path)
+
+        sync_lock = "/tmp/unzip_sync.lock"
+
+        # Each server contains 8 devices as most.
+        if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
+            print("Zip file path: ", zip_file_1)
+            print("Unzip file save dir: ", save_dir_1)
+            unzip(zip_file_1, save_dir_1)
+            print("===Finish extract data synchronization===")
+            try:
+                os.mknod(sync_lock)
+            except IOError:
+                pass
+
+        while True:
+            if os.path.exists(sync_lock):
+                break
+            time.sleep(1)
+
+        print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
+
+    config.ckpt_path = os.path.join(config.output_path, config.ckpt_path)
+
+
+def get_network(net, cfg, learning_rate):
+    opt = Momentum(params=get_param_groups(net),
+                   learning_rate=Tensor(learning_rate),
+                   momentum=cfg.momentum,
+                   weight_decay=cfg.weight_decay,
+                   loss_scale=cfg.loss_scale)
+    net = TrainingWrapper(net, opt)
+    net.set_train()
+    return net
+
+
+@moxing_wrapper(pre_process=modelarts_pre_process)
+def run_train():
+
+    profiler = set_default()
+    loss_meter = AverageMeter('loss')
+    context.reset_auto_parallel_context()
+    parallel_mode = ParallelMode.STAND_ALONE
+    degree = 1
+    if config.is_distributed:
+        parallel_mode = ParallelMode.DATA_PARALLEL
+        degree = get_group_size()
+    context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=degree)
+
+    network = YOLOV4CspDarkNet53()
+    if config.run_eval:
+        network_eval = network
+    # default is kaiming-normal
+    default_recurisive_init(network)
+    load_yolov4_params(config, network)
+
+    network = YoloWithLossCell(network)
+    config.logger.info('finish get network')
+
+    ds, data_size = create_yolo_dataset(image_dir=config.data_root, anno_path=config.annFile, is_training=True,
+                                        batch_size=config.per_batch_size, max_epoch=config.max_epoch,
+                                        device_num=config.group_size, rank=config.rank, default_config=config)
+    config.logger.info('Finish loading dataset')
+
+    config.steps_per_epoch = int(data_size / config.per_batch_size / config.group_size)
+
+    if config.ckpt_interval <= 0: config.ckpt_interval = config.steps_per_epoch
+
+    lr = get_lr(config)
+    network = get_network(network, config, lr)
+    network.set_train(True)
+
+    if config.rank_save_ckpt_flag or config.run_eval:
+        cb_params = InternalCallbackParam()
+        cb_params.train_network = network
+        cb_params.epoch_num = config.max_epoch * config.steps_per_epoch // config.ckpt_interval
+        cb_params.cur_epoch_num = 1
+        run_context = RunContext(cb_params)
+
+    if config.rank_save_ckpt_flag:
+        # checkpoint save
+        ckpt_max_num = 10
+        ckpt_config = CheckpointConfig(save_checkpoint_steps=config.ckpt_interval,
+                                       keep_checkpoint_max=ckpt_max_num)
+        save_ckpt_path = os.path.join(config.outputs_dir, 'ckpt_' + str(config.rank) + '/')
+        ckpt_cb = ModelCheckpoint(config=ckpt_config, directory=save_ckpt_path, prefix='{}'.format(config.rank))
+        ckpt_cb.begin(run_context)
+
+    if config.run_eval:
+        data_val_root = config.data_val_root
+        ann_val_file = config.ann_val_file
+        save_ckpt_path = os.path.join(config.outputs_dir, 'ckpt_' + str(config.rank) + '/')
+        input_val_shape = Tensor(tuple(config.test_img_shape), mindspore.float32)
+        # init detection engine
+        eval_dataset, eval_data_size = create_yolo_dataset(data_val_root, ann_val_file, is_training=False,
+                                                           batch_size=1, max_epoch=1, device_num=1,
+                                                           rank=0, shuffle=False, default_config=config)
+        eval_param_dict = {"net": network_eval, "dataset": eval_dataset, "data_size": eval_data_size,
+                           "anno_json": ann_val_file, "input_shape": input_val_shape, "args": config}
+        eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=config.eval_interval,
+                               eval_start_epoch=config.eval_start_epoch, save_best_ckpt=True,
+                               ckpt_directory=save_ckpt_path, besk_ckpt_name="best_map.ckpt", metrics_name="mAP")
+
+    old_progress = -1
+    t_end = time.time()
+    data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=1)
+
+    for i, data in enumerate(data_loader):
+        images = data["image"]
+        input_shape = images.shape[2:4]
+        config.logger.info('iter[%d], shape%d', i + 1, input_shape[0])
+
+        images = Tensor.from_numpy(images)
+        batch_y_true_0 = Tensor.from_numpy(data['bbox1'])
+        batch_y_true_1 = Tensor.from_numpy(data['bbox2'])
+        batch_y_true_2 = Tensor.from_numpy(data['bbox3'])
+        batch_gt_box0 = Tensor.from_numpy(data['gt_box1'])
+        batch_gt_box1 = Tensor.from_numpy(data['gt_box2'])
+        batch_gt_box2 = Tensor.from_numpy(data['gt_box3'])
+
+        input_shape = Tensor(tuple(input_shape[::-1]), mindspore.float32)
+        loss = network(images, batch_y_true_0, batch_y_true_1, batch_y_true_2, batch_gt_box0, batch_gt_box1,
+                       batch_gt_box2, input_shape)
+        loss_meter.update(loss.asnumpy())
+
+        # ckpt progress
+        if config.rank_save_ckpt_flag:
+            cb_params.cur_step_num = i + 1  # current step number
+            cb_params.batch_num = i + 2
+            ckpt_cb.step_end(run_context)
+
+        if (i + 1) % config.log_interval == 0:
+            time_used = time.time() - t_end
+            epoch = int((i + 1) / config.steps_per_epoch)
+            fps = config.per_batch_size * (i - old_progress) * config.group_size / time_used
+            if config.rank == 0:
+                config.logger.info('epoch[{}], iter[{}], {}, per step time: {:.2f} ms, fps: {:.2f}, lr:{}'.format(
+                    epoch, i, loss_meter, 1000 * time_used / (i - old_progress), fps, lr[i]))
+            t_end = time.time()
+            loss_meter.reset()
+            old_progress = i
+
+        if (i + 1) % config.steps_per_epoch == 0 and (config.run_eval or config.rank_save_ckpt_flag):
+            if config.run_eval:
+                eval_cb.epoch_end(run_context)
+                network.set_train()
+            cb_params.cur_epoch_num += 1
+
+        if config.need_profiler and profiler is not None:
+            if i == 10:
+                profiler.analyse()
+                break
+
+    ckpt_path = os.path.join(config.outputs_dir, 'ckpt_' + str(config.rank), '0-1_117266.ckpt')
+
+    network_export = YOLOV4CspDarkNet53()
+    network_export.set_train(False)
+
+    param_dict = load_checkpoint(ckpt_path)
+    load_param_into_net(network_export, param_dict)
+    input_data = Tensor(np.zeros([config.batch_size, 3, config.testing_shape, config.testing_shape]), mindspore.float32)
+
+    export(network_export, input_data, file_name=config.file_name, file_format="AIR")
+
+if __name__ == "__main__":
+    run_train()
diff --git a/research/cv/EDSR/infer/convert/aipp_edsr_opencv.cfg b/research/cv/EDSR/infer/convert/aipp_edsr_opencv.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..931dbfeda97c5be23496361265e067cd4277ff3b
--- /dev/null
+++ b/research/cv/EDSR/infer/convert/aipp_edsr_opencv.cfg
@@ -0,0 +1,5 @@
+aipp_op {
+aipp_mode:static
+input_format:RGB888_U8
+}
+
diff --git a/research/cv/EDSR/infer/convert/convert_om.sh b/research/cv/EDSR/infer/convert/convert_om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a57ec0d1b6ffb40da082313c6aa59116f6d6186d
--- /dev/null
+++ b/research/cv/EDSR/infer/convert/convert_om.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+model_path=$1
+aipp_cfg_path=$2
+output_model_name=$3
+
+atc \
+--model=$model_path \
+--input_format=NCHW \
+--framework=1 \
+--output=$output_model_name \
+--log=error \
+--soc_version=Ascend310 \
+--insert_op_conf=$aipp_cfg_path
diff --git a/research/cv/EDSR/infer/data/config/edsr.pipeline b/research/cv/EDSR/infer/data/config/edsr.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..ea3c2a13d17509f040ca2b5abff7bec6a33a5536
--- /dev/null
+++ b/research/cv/EDSR/infer/data/config/edsr.pipeline
@@ -0,0 +1,28 @@
+{
+        "edsr_superResolution": {
+            "stream_config": {
+                "deviceId": "0"
+            },
+            "appsrc0": {
+                "props": {
+                    "blocksize": "409600"
+                },
+                "factory": "appsrc",
+                "next": "mxpi_tensorinfer0"
+            },
+            "mxpi_tensorinfer0": {
+                "props": {
+                    "dataSource": "appsrc0",
+                    "modelPath": "../model/edsr.om"
+                },
+                "factory": "mxpi_tensorinfer",
+                "next": "appsink0"
+            },
+            "appsink0": {
+                "props": {
+                    "blocksize": "409600"
+                },
+                "factory": "appsink"
+            }
+        }
+    }
\ No newline at end of file
diff --git a/research/cv/EDSR/infer/docker_start_infer.sh b/research/cv/EDSR/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d8cf649151b2fe5c8b6ae2c8aa9c0c296b2d9778
--- /dev/null
+++ b/research/cv/EDSR/infer/docker_start_infer.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+#coding = utf-8
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the BSD 3-Clause License  (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://opensource.org/licenses/BSD-3-Clause
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_dir=$2
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image data_dir"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${data_dir}" ]; then
+        echo "please input data_dir"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it \
+  --device=/dev/davinci0 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${data_dir}:${data_dir} \
+  ${docker_image} \
+  /bin/bash
diff --git a/research/cv/EDSR/infer/mxbase/CMakeLists.txt b/research/cv/EDSR/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1538ccd9dbdfaef45688962469df562405447cb1
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,55 @@
+cmake_minimum_required(VERSION 3.14.0)
+project(edsr)
+
+set(TARGET edsr)
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+#add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
+
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall
+-Dgoogle=mindxsdk_private -D_GLIBCXX_USE_CXX11_ABI=0)
+
+
+#Check environment variable
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+if(NOT DEFINED ENV{ASCEND_VERSION})
+    message(WARNING "please define environment variable:ASCEND_VERSION")
+endif()
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include)
+set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64)
+
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/postprocess/include)
+
+
+if(DEFINED ENV{MXSDK_OPENSOURCE_DIR})
+    set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
+else()
+    set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource)
+endif()
+
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+add_executable(${TARGET} main.cpp EdsrSuperresolution.cpp)
+target_link_libraries(${TARGET} glog cpprest mxbase opencv_world)
+
+install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
diff --git a/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.cpp b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1016326d41bc03837732c47a46d27b548a53dc98
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.cpp
@@ -0,0 +1,200 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "EdsrSuperresolution.h"
+
+#include <memory>
+#include <vector>
+#include <string>
+
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/Log/Log.h"
+
+
+namespace localParameter {
+    const uint32_t VECTOR_FIRST_INDEX = 0;
+    const uint32_t VECTOR_SECOND_INDEX = 1;
+    const uint32_t VECTOR_THIRD_INDEX = 2;
+    const uint32_t VECTOR_FOURTH_INDEX = 3;
+    const uint32_t VECTOR_FIFTH_INDEX = 4;
+}
+
+APP_ERROR EdsrSuperresolution::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    uint32_t outputModelHeight = modelDesc_.outputTensors[0].tensorDims[localParameter::VECTOR_THIRD_INDEX];
+    uint32_t inputModelHeight = modelDesc_.inputTensors[0].tensorDims[localParameter::VECTOR_SECOND_INDEX];
+    uint32_t inputModelWidth = modelDesc_.inputTensors[0].tensorDims[localParameter::VECTOR_THIRD_INDEX];
+
+    scale_ = outputModelHeight/inputModelHeight;
+    maxEdge_ = inputModelWidth > inputModelHeight ? inputModelWidth:inputModelHeight;
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::DeInit() {
+    model_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::ReadImage(const std::string &imgPath, cv::Mat *imageMat) {
+    *imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    imageWidth_ = imageMat->cols;
+    imageHeight_ = imageMat->rows;
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::PaddingImage(cv::Mat *imageSrc, cv::Mat *imageDst, const uint32_t &targetLength) {
+    uint32_t padding_h = targetLength - imageHeight_;
+    uint32_t padding_w = targetLength - imageWidth_;
+    cv::copyMakeBorder(*imageSrc, *imageDst, 0, padding_h, 0, padding_w, cv::BORDER_CONSTANT, 0);
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR EdsrSuperresolution::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase) {
+    const uint32_t dataSize = imageMat.cols * imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU;
+
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+
+    MxBase::MemoryData memoryDataSrc(imageMat.data, dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+
+    std::vector<uint32_t> shape = {imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(imageMat.cols)};
+    *tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT8);
+    return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::Inference(std::vector<MxBase::TensorBase> *inputs,
+                                      std::vector<MxBase::TensorBase> *outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs->push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    APP_ERROR ret = model_->ModelInference(*inputs, *outputs, dynamicInfo);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR EdsrSuperresolution::PostProcess(std::vector<MxBase::TensorBase> *inputs, cv::Mat *imageMat) {
+        MxBase::TensorBase tensor = *inputs->begin();
+        int ret = tensor.ToHost();
+        if (ret != APP_ERR_OK) {
+            LogError << GetError(ret) << "Tensor deploy to host failed.";
+            return ret;
+        }
+        uint32_t outputModelChannel = tensor.GetShape()[localParameter::VECTOR_SECOND_INDEX];
+        uint32_t outputModelHeight = tensor.GetShape()[localParameter::VECTOR_THIRD_INDEX];
+        uint32_t outputModelWidth = tensor.GetShape()[localParameter::VECTOR_FOURTH_INDEX];
+        LogInfo << "Channel:" << outputModelChannel << " Height:" << outputModelHeight << " Width:" << outputModelWidth;
+
+        uint32_t finalHeight = imageHeight_ * scale_;
+        uint32_t finalWidth = imageWidth_ * scale_;
+        cv::Mat output(finalHeight, finalWidth, CV_32FC3);
+
+        auto data = reinterpret_cast<float(*)[outputModelChannel]
+        [outputModelHeight][outputModelWidth]>(tensor.GetBuffer());
+
+        for (size_t c = 0; c < outputModelChannel; ++c) {
+            for (size_t x = 0; x < finalHeight; ++x) {
+                for (size_t y = 0; y < finalWidth; ++y) {
+                    output.at<cv::Vec3f>(x, y)[c] = data[0][c][x][y];
+                }
+            }
+        }
+
+        *imageMat = output;
+        return APP_ERR_OK;
+}
+
+APP_ERROR EdsrSuperresolution::Process(const std::string &imgPath) {
+    cv::Mat imageMat;
+    APP_ERROR ret = ReadImage(imgPath, &imageMat);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImage failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    PaddingImage(&imageMat, &imageMat, maxEdge_);
+    MxBase::TensorBase tensorBase;
+    ret = CVMatToTensorBase(imageMat, &tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+
+
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs = {};
+    inputs.push_back(tensorBase);
+    ret = Inference(&inputs, &outputs);
+
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    cv::Mat output;
+    ret = PostProcess(&outputs, &output);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::string resultPath = imgPath;
+    size_t pos = resultPath.find_last_of(".");
+    resultPath.replace(resultPath.begin() + pos, resultPath.end(), "_infer.png");
+    cv::imwrite(resultPath, output);
+    return APP_ERR_OK;
+}
diff --git a/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.h b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.h
new file mode 100644
index 0000000000000000000000000000000000000000..36b1ab9cf70a29997fd323597af23cf8695f8f9a
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/EdsrSuperresolution.h
@@ -0,0 +1,54 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EDSR_SUPERRESULOTION_H
+#define EDSR_SUPERRESULOTION_H
+
+#include <memory>
+#include <vector>
+#include <string>
+#include <opencv2/opencv.hpp>
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/PostProcessBases/PostProcessDataType.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string modelPath;
+};
+
+class EdsrSuperresolution {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+    APP_ERROR DeInit();
+    APP_ERROR ReadImage(const std::string &imgPath, cv::Mat *imageMat);
+    APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase);
+    APP_ERROR Inference(std::vector<MxBase::TensorBase> *inputs, std::vector<MxBase::TensorBase> *outputs);
+    APP_ERROR Process(const std::string &imgPath);
+    APP_ERROR PostProcess(std::vector<MxBase::TensorBase> *inputs, cv::Mat *imageMat);
+    APP_ERROR PaddingImage(cv::Mat *imageSrc, cv::Mat *imageDst, const uint32_t &targetLength);
+
+ private:
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    MxBase::ModelDesc modelDesc_;
+    uint32_t deviceId_ = 0;
+    uint32_t scale_ = 0;
+    uint32_t imageWidth_ = 0;
+    uint32_t imageHeight_ = 0;
+    uint32_t maxEdge_ = 0;
+};
+
+#endif
diff --git a/research/cv/EDSR/infer/mxbase/build.sh b/research/cv/EDSR/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..566c6461ee1262c032d8c133a882d119e98755ec
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/build.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+
+# env
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make);
+    then
+      echo "make failed."
+      return 1
+    fi
+
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
diff --git a/research/cv/EDSR/infer/mxbase/main.cpp b/research/cv/EDSR/infer/mxbase/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cb3401c0d17b08e7a707539d38a2f394535b1a4b
--- /dev/null
+++ b/research/cv/EDSR/infer/mxbase/main.cpp
@@ -0,0 +1,46 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "EdsrSuperresolution.h"
+#include "MxBase/Log/Log.h"
+
+
+// infer an image
+int main(int argc, char *argv[]) {
+    if (argc <= 1) {
+        LogWarn << "Please input image path, such as './test.png'";
+        return APP_ERR_OK;
+    }
+    InitParam initParam = {};
+    initParam.deviceId = 0;
+    initParam.modelPath = "../model/edsr.om";
+    EdsrSuperresolution esdrSR;
+    APP_ERROR ret = esdrSR.Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "EdsrSuperresolution init failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::string imgPath = argv[1];
+    ret = esdrSR.Process(imgPath);
+    if (ret != APP_ERR_OK) {
+        LogError << "EdsrSuperresolution process failed, ret=" << ret << ".";
+        esdrSR.DeInit();
+        return ret;
+    }
+
+    esdrSR.DeInit();
+    return APP_ERR_OK;
+}
diff --git a/research/cv/EDSR/infer/sdk/eval.py b/research/cv/EDSR/infer/sdk/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..57242b243db2b2e8fe214eccbc15724e68318278
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/eval.py
@@ -0,0 +1,70 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""eval for sdk infer"""
+import argparse
+import os
+import math
+import cv2
+import numpy as np
+
+def parser_args():
+    """parse arguments"""
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--label_dir", type=str, default="../data/DIV2K/label/",
+                        help="path of label images directory")
+    parser.add_argument("--infer_dir", type=str, default=" ../data/sdk_out",
+                        help="path of infer images directory")
+    parser.add_argument("--scale", type=int, default=2)
+    return parser.parse_args()
+
+
+def calc_psnr(sr, hr, scale, rgb_range):
+    """calculate psnr"""
+    hr = np.float32(hr)
+    sr = np.float32(sr)
+    diff = (sr - hr) / rgb_range
+    gray_coeffs = np.array([65.738, 129.057, 25.064]).reshape((1, 3, 1, 1)) / 256
+    diff = np.multiply(diff, gray_coeffs).sum(1)
+    if hr.size == 1:
+        return 0
+    if scale != 1:
+        shave = scale
+    else:
+        shave = scale + 6
+    if scale == 1:
+        valid = diff
+    else:
+        valid = diff[..., shave:-shave, shave:-shave]
+    mse = np.mean(pow(valid, 2))
+    return -10 * math.log10(mse)
+
+
+if __name__ == '__main__':
+    args = parser_args()
+    infer_path_list = os.listdir(args.infer_dir)
+    total_num = len(infer_path_list)
+    mean_psnr = 0.0
+    for infer_p in infer_path_list:
+        infer_path = os.path.join(args.infer_dir, infer_p)
+        label_path = os.path.join(args.label_dir, infer_p.replace('_infer', ''))
+        infer_img = cv2.imread(infer_path)
+        h, w = infer_img.shape[:2]
+        label_img = cv2.imread(label_path)[0:h, 0:w]
+        infer_img = np.expand_dims(infer_img, 0).transpose((0, 3, 1, 2))
+        label_img = np.expand_dims(label_img, 0).transpose((0, 3, 1, 2))
+        psnr = calc_psnr(infer_img, label_img, args.scale, 255.0)
+        mean_psnr += psnr/total_num
+        print("current psnr: ", psnr)
+    print('Mean psnr of %s images is %.4f' % (total_num, mean_psnr))
diff --git a/research/cv/EDSR/infer/sdk/main.py b/research/cv/EDSR/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..6932b57b5e0fd0c7fbd9641f31c0e1ee8a0ab0a1
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/main.py
@@ -0,0 +1,43 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""run sdk infer"""
+import argparse
+import os
+from sr_infer_wrapper import SRInferWrapper
+
+def parser_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--input_dir", type=str, default="../data/DIV2K/input/",
+                        help="path of input images directory")
+    parser.add_argument("--pipeline_path", type=str, default="../data/config/edsr.pipeline",
+                        help="path of pipeline file")
+    parser.add_argument("--output_dir", type=str, default="../data/sdk_out/",
+                        help="path of output images directory")
+    return parser.parse_args()
+
+
+if __name__ == '__main__':
+    args = parser_args()
+    sr_infer = SRInferWrapper()
+    sr_infer.load_pipeline(args.pipeline_path)
+    path_list = os.listdir(args.input_dir)
+    path_list.sort()
+    if not os.path.exists(args.output_dir):
+        os.makedirs(args.output_dir)
+
+    for img_path in path_list:
+        print(img_path)
+        res = sr_infer.do_infer(os.path.join(args.input_dir, img_path))
+        res.save(os.path.join(args.output_dir, img_path.replace('x2', '_infer')))
diff --git a/research/cv/EDSR/infer/sdk/run.sh b/research/cv/EDSR/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f1b9ff91f97c47c2954940b10c47b9e896ff1622
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/run.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+# The number of parameters must be 2.
+if [ $# -ne 3 ]
+then
+  echo "Wrong parameter format."
+  echo "Usage:"
+  echo "         bash $0 [INPUT_PATH] [PIPELINE_PATH] [OUTPUT_PATH]"
+  echo "Example: "
+  echo "         bash run.sh ../data/DIV2K/input/ ../data/config/edsr.pipeline ../data/sdk_out/"
+
+  exit 1
+fi
+
+# The path of a folder containing eval images.
+input_dir=$1
+# The path of pipeline file.
+pipeline_path=$2
+# The path of a folder used to store all results.
+output_dir=$3
+
+
+if [ ! -d $input_dir ]
+then
+  echo "Please input the correct directory containing images."
+  exit
+fi
+
+if [ ! -d $output_dir ]
+then
+  mkdir -p $output_dir
+fi
+
+set -e
+
+CUR_PATH=$(cd "$(dirname "$0")" || { warn "Failed to check path/to/run.sh" ; exit ; } ; pwd)
+echo "enter $CUR_PATH"
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH}
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+if [ ! "${MX_SDK_HOME}" ]
+then
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+fi
+
+if [ ! "${MX_SDK_HOME}" ]
+then
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+fi
+
+python3  main.py  --input_dir=$input_dir \
+                  --pipeline_path=$pipeline_path \
+                  --output_dir=$output_dir \
+                  
+exit 0
\ No newline at end of file
diff --git a/research/cv/EDSR/infer/sdk/sr_infer_wrapper.py b/research/cv/EDSR/infer/sdk/sr_infer_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..2631e2e8849fbbd8f14d72fa784f606952c99e89
--- /dev/null
+++ b/research/cv/EDSR/infer/sdk/sr_infer_wrapper.py
@@ -0,0 +1,125 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""super resolution infer wrapper"""
+import json
+import numpy as np
+from PIL import Image
+import cv2
+from StreamManagerApi import StreamManagerApi, StringVector, InProtobufVector, MxProtobufIn
+import MxpiDataType_pb2 as MxpiDataType
+
+DEFAULT_IMAGE_WIDTH = 1020
+DEFAULT_IMAGE_HEIGHT = 1020
+CHANNELS = 3
+SCALE = 2
+
+def padding(img, target_shape):
+    h, w = target_shape[0], target_shape[1]
+    img_h, img_w, _ = img.shape
+    dh, dw = h - img_h, w - img_w
+    if dh < 0 or dw < 0:
+        raise RuntimeError(f"target_shape is bigger than img.shape, {target_shape} > {img.shape}")
+    if dh != 0 or dw != 0:
+        img = np.pad(img, ((0, int(dh)), (0, int(dw)), (0, 0)), "reflect")
+    return img
+
+
+
+def unpadding(img, target_shape):
+    h, w = target_shape[0], target_shape[1]
+    img_h, img_w, _ = img.shape
+    if img_h > h:
+        img = img[:h, :, :]
+    if img_w > w:
+        img = img[:, :w, :]
+    return img
+
+
+
+class SRInferWrapper:
+    """super resolution infer wrapper"""
+    def __init__(self):
+        self.stream_name = None
+        self.streamManagerApi = StreamManagerApi()
+        # init stream manager
+        if self.streamManagerApi.InitManager() != 0:
+            raise RuntimeError("Failed to init stream manager.")
+
+    def load_pipeline(self, pipeline_path):
+        # create streams by pipeline config file
+        with open(pipeline_path, 'r') as f:
+            pipeline = json.load(f)
+        self.stream_name = list(pipeline.keys())[0].encode()
+        pipelineStr = json.dumps(pipeline).encode()
+        if self.streamManagerApi.CreateMultipleStreams(pipelineStr) != 0:
+            raise RuntimeError("Failed to create stream.")
+
+    def do_infer(self, image_path):
+        """do infer process"""
+        # construct the input of the stream
+        image = cv2.imread(image_path)
+        ori_h, ori_w, _ = image.shape
+        image = padding(image, (DEFAULT_IMAGE_HEIGHT, DEFAULT_IMAGE_WIDTH))
+        tensor_pkg_list = MxpiDataType.MxpiTensorPackageList()
+        tensor_pkg = tensor_pkg_list.tensorPackageVec.add()
+        tensor_vec = tensor_pkg.tensorVec.add()
+        tensor_vec.deviceId = 0
+        tensor_vec.memType = 0
+
+        for dim in [1, *image.shape]:
+            tensor_vec.tensorShape.append(dim)
+
+        input_data = image.tobytes()
+        tensor_vec.dataStr = input_data
+        tensor_vec.tensorDataSize = len(input_data)
+
+        protobuf_vec = InProtobufVector()
+        protobuf = MxProtobufIn()
+        protobuf.key = b'appsrc0'
+        protobuf.type = b'MxTools.MxpiTensorPackageList'
+        protobuf.protobuf = tensor_pkg_list.SerializeToString()
+        protobuf_vec.push_back(protobuf)
+
+        unique_id = self.streamManagerApi.SendProtobuf(
+            self.stream_name, 0, protobuf_vec)
+        if unique_id < 0:
+            raise RuntimeError("Failed to send data to stream.")
+
+        # get plugin output data
+        key = b"mxpi_tensorinfer0"
+        keyVec = StringVector()
+        keyVec.push_back(key)
+        inferResult = self.streamManagerApi.GetProtobuf(self.stream_name, 0, keyVec)
+        if inferResult.size() == 0:
+            raise RuntimeError("inferResult is null")
+        if inferResult[0].errorCode != 0:
+            raise RuntimeError("GetProtobuf error. errorCode=%d, errorMsg=%s" % (
+                inferResult[0].errorCode, inferResult[0].messageName.decode()))
+
+        # get the infer result
+        inferList0 = MxpiDataType.MxpiTensorPackageList()
+        inferList0.ParseFromString(inferResult[0].messageBuf)
+        inferVisionData = inferList0.tensorPackageVec[0].tensorVec[0].dataStr
+
+        # converting the byte data into 32 bit float array
+        output_img_data = np.frombuffer(inferVisionData, dtype=np.float32)
+        output_img_data = np.clip(output_img_data, 0, 255)
+        output_img_data = np.round(output_img_data).astype(np.uint8)
+        output_img_data = np.reshape(output_img_data, (CHANNELS, SCALE*DEFAULT_IMAGE_HEIGHT, SCALE*DEFAULT_IMAGE_WIDTH))
+        output_img_data = output_img_data.transpose((1, 2, 0))
+        output_img_data = unpadding(output_img_data, (SCALE*ori_h, SCALE*ori_w))
+        result = Image.fromarray(output_img_data[..., ::-1])
+
+        return result
diff --git a/research/cv/EDSR/modelarts/train_start.py b/research/cv/EDSR/modelarts/train_start.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c3a1b019e0a13354000af8728b3cb95e1a41cff
--- /dev/null
+++ b/research/cv/EDSR/modelarts/train_start.py
@@ -0,0 +1,127 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train edsr om modelarts"""
+import argparse
+import os
+import subprocess
+import moxing as mox
+
+
+_CACHE_DATA_URL = "/cache/data_url"
+_CACHE_TRAIN_URL = "/cache/train_url"
+
+def _parse_args():
+    """parse arguments"""
+    parser = argparse.ArgumentParser(description='train and export edsr on modelarts')
+    # train output path
+    parser.add_argument('--train_url', type=str, default='', help='where training log and ckpts saved')
+    # dataset dir
+    parser.add_argument('--data_url', type=str, default='', help='where training log and ckpts saved')
+    # train config
+    parser.add_argument('--data_train', type=str, default='DIV2K', help='train dataset name')
+    parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train')
+    parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training')
+    parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
+    parser.add_argument('--init_loss_scale', type=float, default=65536., help='scaling factor')
+    parser.add_argument('--loss_scale', type=float, default=1024.0, help='loss_scale')
+    parser.add_argument('--scale', type=str, default='2', help='super resolution scale')
+    parser.add_argument('--ckpt_save_path', type=str, default='ckpt', help='path to save ckpt')
+    parser.add_argument('--ckpt_save_interval', type=int, default=10, help='save ckpt frequency, unit is epoch')
+    parser.add_argument('--ckpt_save_max', type=int, default=5, help='max number of saved ckpt')
+    parser.add_argument('--task_id', type=int, default=0)
+    # export config
+    parser.add_argument("--export_batch_size", type=int, default=1, help="batch size")
+    parser.add_argument("--export_file_name", type=str, default="edsr", help="output file name.")
+    parser.add_argument("--export_file_format", type=str, default="AIR",
+                        choices=['MINDIR', 'AIR', 'ONNX'], help="file format")
+    args, _ = parser.parse_known_args()
+
+    return args
+
+
+def _train(args, data_url):
+    """use train.py"""
+    pwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    train_file = os.path.join(pwd, "train.py")
+
+    cmd = ["python", train_file,
+           f"--dir_data={os.path.abspath(data_url)}",
+           f"--data_train={args.data_train}",
+           f"--epochs={args.epochs}",
+           f"--batch_size={args.batch_size}",
+           f"--lr={args.lr}",
+           f"--init_loss_scale={args.init_loss_scale}",
+           f"--loss_scale={args.loss_scale}",
+           f"--scale={args.scale}",
+           f"--task_id={args.task_id}",
+           f"--ckpt_save_path={os.path.join(_CACHE_TRAIN_URL,args.ckpt_save_path)}",
+           f"--ckpt_save_interval={args.ckpt_save_interval}",
+           f"--ckpt_save_max={args.ckpt_save_max}"]
+
+    print(' '.join(cmd))
+    process = subprocess.Popen(cmd, shell=False)
+    return process.wait()
+
+def _get_last_ckpt(ckpt_dir):
+    """get the last ckpt path"""
+    file_dict = {}
+    lists = os.listdir(ckpt_dir)
+    if not lists:
+        print("No ckpt file found.")
+        return None
+    for i in lists:
+        ctime = os.stat(os.path.join(ckpt_dir, i)).st_ctime
+        file_dict[ctime] = i
+    max_ctime = max(file_dict.keys())
+    ckpt_file = os.path.join(ckpt_dir, file_dict[max_ctime])
+
+    return ckpt_file
+
+
+
+def _export_air(args, ckpt_dir):
+    """export"""
+    ckpt_file = _get_last_ckpt(ckpt_dir)
+    if not ckpt_file:
+        return
+    pwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    export_file = os.path.join(pwd, "export.py")
+    cmd = ["python", export_file,
+           f"--batch_size={args.export_batch_size}",
+           f"--ckpt_path={ckpt_file}",
+           f"--file_name={os.path.join(_CACHE_TRAIN_URL, args.export_file_name)}",
+           f"--file_format={args.export_file_format}",]
+    print(f"Start exporting, cmd = {' '.join(cmd)}.")
+    process = subprocess.Popen(cmd, shell=False)
+    process.wait()
+
+
+def main():
+    args = _parse_args()
+
+    os.makedirs(_CACHE_TRAIN_URL, exist_ok=True)
+    os.makedirs(_CACHE_DATA_URL, exist_ok=True)
+
+    mox.file.copy_parallel(args.data_url, _CACHE_DATA_URL)
+    data_url = _CACHE_DATA_URL
+
+    _train(args, data_url)
+    _export_air(args, os.path.join(_CACHE_TRAIN_URL, args.ckpt_save_path))
+    mox.file.copy_parallel(_CACHE_TRAIN_URL, args.train_url)
+
+
+
+if __name__ == '__main__':
+    main()