diff --git a/.jenkins/check/config/filter_cpplint.txt b/.jenkins/check/config/filter_cpplint.txt
index 41a00d12ead8264ed1f566d050ff02f76495a153..2d8fb85c63fef7a80f011c7651e672fcb630ee99 100644
--- a/.jenkins/check/config/filter_cpplint.txt
+++ b/.jenkins/check/config/filter_cpplint.txt
@@ -71,4 +71,7 @@
 
 "models/official/cv/tinydarknet/infer/mxbase/TinydarknetClassify.h" "runtime/references"
 "models/official/cv/tinydarknet/infer/mxbase/TinydarknetClassify.cpp" "runtime/references"
-"models/official/cv/tinydarknet/infer/mxbase/main.cpp" "runtime/references"
\ No newline at end of file
+"models/official/cv/tinydarknet/infer/mxbase/main.cpp" "runtime/references"
+
+"models/official/cv/resnet/infer/ResNet152/mxbase/Resnet152ClassifyOpencv.h"                 "runtime/references"
+"models/official/cv/resnet/infer/ResNet18/mxbase/Resnet18ClassifyOpencv.h"                   "runtime/references"
\ No newline at end of file
diff --git a/.jenkins/check/config/whitelizard.txt b/.jenkins/check/config/whitelizard.txt
index 3492971906d065ee2163841984d3b1620b24934f..934d28a6a7be265acac026ab25076b15c54be201 100644
--- a/.jenkins/check/config/whitelizard.txt
+++ b/.jenkins/check/config/whitelizard.txt
@@ -1,4 +1,5 @@
-# Scene1: 
+
+# Scene1:
 #    function_name1, function_name2
 # Scene2: 
 #    file_path:function_name1, function_name2
diff --git a/official/cv/resnet/infer/ResNet152/convert/aipp.config b/official/cv/resnet/infer/ResNet152/convert/aipp.config
new file mode 100644
index 0000000000000000000000000000000000000000..84886ae1a2432b993d0990221de2f76424f679b4
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/convert/aipp.config
@@ -0,0 +1,16 @@
+aipp_op {
+    aipp_mode: static
+    input_format : RGB888_U8
+
+    rbuv_swap_switch : true
+
+    mean_chn_0 : 0
+    mean_chn_1 : 0
+    mean_chn_2 : 0
+    min_chn_0 : 123.675
+    min_chn_1 : 116.28
+    min_chn_2 : 103.53
+    var_reci_chn_0 : 0.0171247538316637
+    var_reci_chn_1 : 0.0175070028011204
+    var_reci_chn_2 : 0.0174291938997821
+}
diff --git a/official/cv/resnet/infer/ResNet152/convert/convert_om.sh b/official/cv/resnet/infer/ResNet152/convert/convert_om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4c3a5c41da72d224f72e2c205131aa5c3fa54554
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/convert/convert_om.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -ne 3 ]
+then
+  echo "Wrong parameter format."
+  echo "Usage:"
+  echo "         bash $0 INPUT_AIR_PATH OUTPUT_OM_PATH_NAME"
+  echo "Example: "
+  echo "         bash convert_om.sh models/0-150_1251.air models/0-150_1251.om"
+
+  exit 255
+fi
+
+air_path=$1
+aipp_cfg_path=$2
+output_model_name=$3
+
+echo "Input AIR file path: ${air_path}"
+echo "Output OM file path: ${output_model_name}"
+echo "AIPP cfg file path: ${aipp_cfg_path}"
+
+
+/usr/local/Ascend/atc/bin/atc \
+--model=$air_path \
+--framework=1 \
+--output=$output_model_name \
+--input_format=NCHW --input_shape="actual_input_1:1,3,304,304" \
+--enable_small_channel=1 \
+--log=error \
+--soc_version=Ascend310 \
+--insert_op_conf="$aipp_cfg_path" \
+--output_type=FP32
+
diff --git a/official/cv/resnet/infer/ResNet152/convert/data.py b/official/cv/resnet/infer/ResNet152/convert/data.py
new file mode 100644
index 0000000000000000000000000000000000000000..82a3d88926f91505919f5eb0b0960796a07437b4
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/convert/data.py
@@ -0,0 +1,37 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""data convert"""
+
+import sys
+
+def convert(input_path, output_path):
+    data = []
+    with open(input_path, 'r') as f:
+        for line in f.readlines():
+            data.append(line[10:-1])
+    fp = open(output_path, 'a', encoding='utf-8')
+    for name in data:
+        fp.write(name+'\n')
+
+
+if __name__ == '__main__':
+    if len(sys.argv) != 3:
+        print("Wrong parameter format.")
+        print("Usage:")
+        print("    python3 convert.py [SYN__PATH] [NAMES_OUTPUT_PATH_NAME]")
+        sys.exit()
+    input_p = sys.argv[1]
+    output_p = sys.argv[2]
+    convert(input_p, output_p)
diff --git a/official/cv/resnet/infer/ResNet152/data/config/resnet152.cfg b/official/cv/resnet/infer/ResNet152/data/config/resnet152.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..deb7ac6b5d649e06ecec772413256842341f723c
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/data/config/resnet152.cfg
@@ -0,0 +1,3 @@
+CLASS_NUM=1001
+SOFTMAX=false
+TOP_K=5
diff --git a/official/cv/resnet/infer/ResNet152/docker_start_infer.sh b/official/cv/resnet/infer/ResNet152/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1e91b6142a15e068cb3354c47a2d31c6bf7fabf9
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/docker_start_infer.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_path=$2
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image data_path"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${data_path}" ]; then
+        echo "please input data_path"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it \
+  --device=/dev/davinci0 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${data_path}:${data_path} \
+  ${docker_image} \
+  /bin/bash
diff --git a/official/cv/resnet/infer/ResNet152/mxbase/CMakeLists.txt b/official/cv/resnet/infer/ResNet152/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c4f29af77aa9c4e09068e5ed689f0677096255fa
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/mxbase/CMakeLists.txt
@@ -0,0 +1,48 @@
+cmake_minimum_required(VERSION 3.14.0)
+project(resnet)
+set(TARGET resnet)
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+add_definitions(-Dgoogle=mindxsdk_private)
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
+
+# Check environment variable
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+if(NOT DEFINED ENV{ASCEND_VERSION})
+    message(WARNING "please define environment variable:ASCEND_VERSION")
+endif()
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include)
+set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64)
+
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
+set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource)
+
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+add_executable(${TARGET} main.cpp Resnet152ClassifyOpencv.cpp)
+
+target_link_libraries(${TARGET} glog cpprest mxbase resnet50postprocess opencv_world stdc++fs)
+
+install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
+
diff --git a/official/cv/resnet/infer/ResNet152/mxbase/Resnet152ClassifyOpencv.cpp b/official/cv/resnet/infer/ResNet152/mxbase/Resnet152ClassifyOpencv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8f6aff2caf0993f586538713c13f71a4e2a60cd6
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/mxbase/Resnet152ClassifyOpencv.cpp
@@ -0,0 +1,214 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Resnet152ClassifyOpencv.h"
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+using MxBase::DeviceManager;
+using MxBase::TensorBase;
+using MxBase::MemoryData;
+using MxBase::ClassInfo;
+
+namespace {
+    const uint32_t YUV_BYTE_NU = 3;
+    const uint32_t YUV_BYTE_DE = 2;
+    const uint32_t VPC_H_ALIGN = 2;
+}
+
+APP_ERROR Resnet152ClassifyOpencv::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    MxBase::ConfigData configData;
+    const std::string softmax = initParam.softmax ? "true" : "false";
+    const std::string checkTensor = initParam.checkTensor ? "true" : "false";
+
+    configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum));
+    configData.SetJsonValue("TOP_K", std::to_string(initParam.topk));
+    configData.SetJsonValue("SOFTMAX", softmax);
+    configData.SetJsonValue("CHECK_MODEL", checkTensor);
+
+    auto jsonStr = configData.GetCfgJson().serialize();
+    std::map<std::string, std::shared_ptr<void>> config;
+    config["postProcessConfigContent"] = std::make_shared<std::string>(jsonStr);
+    config["labelPath"] = std::make_shared<std::string>(initParam.labelPath);
+
+    post_ = std::make_shared<MxBase::Resnet50PostProcess>();
+    ret = post_->Init(config);
+    if (ret != APP_ERR_OK) {
+        LogError << "Resnet50PostProcess init failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Resnet152ClassifyOpencv::DeInit() {
+    model_->DeInit();
+    post_->DeInit();
+    DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Resnet152ClassifyOpencv::PreProcess(const std::string &imgPath,
+                                                           TensorBase &tensorBase) {
+    static constexpr uint32_t resizeHeight = 304;
+    static constexpr uint32_t resizeWidth = 304;
+
+    cv::Mat imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    cv::resize(imageMat, imageMat, cv::Size(resizeWidth, resizeHeight));
+
+    static cv::Rect rectOfImg(40, 40, 224, 224);
+    imageMat = imageMat(rectOfImg).clone();
+
+    const uint32_t dataSize =  imageMat.cols *  imageMat.rows * MxBase::XRGB_WIDTH_NU;
+    LogInfo << "image size after resize" << imageMat.cols << " " << imageMat.rows;
+
+    MemoryData memoryDataDst(dataSize, MemoryData::MEMORY_DEVICE, deviceId_);
+    MemoryData memoryDataSrc(imageMat.data, dataSize, MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+
+    std::vector<uint32_t> shape = {imageMat.rows * MxBase::XRGB_WIDTH_NU, static_cast<uint32_t>(imageMat.cols)};
+    tensorBase = TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT8);
+    return APP_ERR_OK;
+}
+
+APP_ERROR Resnet152ClassifyOpencv::Inference(std::vector<TensorBase> &inputs,
+                                            std::vector<TensorBase> &outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        TensorBase tensor(shape, dtypes[i], MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs.push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    inferCostTimeMilliSec += costMs;
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Resnet152ClassifyOpencv::PostProcess(std::vector<TensorBase> &inputs,
+                                              std::vector<std::vector<ClassInfo>> &clsInfos) {
+    APP_ERROR ret = post_->Process(inputs, clsInfos);
+    if (ret != APP_ERR_OK) {
+        LogError << "Process failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Resnet152ClassifyOpencv::SaveResult(const std::string &imgPath,
+                                             std::vector<std::vector<ClassInfo>> &batchClsInfos) {
+    LogInfo << "image path" << imgPath;
+    std::string fileName = imgPath.substr(imgPath.find_last_of("/") + 1);
+    size_t dot = fileName.find_last_of(".");
+    std::string resFileName = "result/" + fileName.substr(0, dot) + "_1.txt";
+    LogInfo << "file path for saving result" << resFileName;
+
+    std::ofstream outfile(resFileName);
+    if (outfile.fail()) {
+        LogError << "Failed to open result file: ";
+        return APP_ERR_COMM_FAILURE;
+    }
+
+    uint32_t batchIndex = 0;
+    for (auto clsInfos : batchClsInfos) {
+        std::string resultStr;
+        for (auto clsInfo : clsInfos) {
+            LogDebug << " className:" << clsInfo.className << " confidence:" << clsInfo.confidence <<
+            " classIndex:" <<  clsInfo.classId;
+            resultStr += std::to_string(clsInfo.classId) + " ";
+        }
+
+        outfile << resultStr << std::endl;
+        batchIndex++;
+    }
+    outfile.close();
+    return APP_ERR_OK;
+}
+
+APP_ERROR Resnet152ClassifyOpencv::Process(const std::string &imgPath) {
+    TensorBase tensorBase;
+    std::vector<TensorBase> inputs;
+    std::vector<TensorBase> outputs;
+
+    APP_ERROR ret = PreProcess(imgPath, tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "Convert image to TensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    inputs.push_back(tensorBase);
+
+    auto startTime = std::chrono::high_resolution_clock::now();
+    ret = Inference(inputs, outputs);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    inferCostTimeMilliSec += costMs;
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::vector<std::vector<ClassInfo>> BatchClsInfos;
+    ret = PostProcess(outputs, BatchClsInfos);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = SaveResult(imgPath, BatchClsInfos);
+    if (ret != APP_ERR_OK) {
+        LogError << "Save infer results into file failed. ret = " << ret << ".";
+        return ret;
+    }
+
+    return APP_ERR_OK;
+}
diff --git a/official/cv/resnet/infer/ResNet152/mxbase/Resnet152ClassifyOpencv.h b/official/cv/resnet/infer/ResNet152/mxbase/Resnet152ClassifyOpencv.h
new file mode 100644
index 0000000000000000000000000000000000000000..7e34a27816f8d4d742ac84c868ec4a75cb346973
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/mxbase/Resnet152ClassifyOpencv.h
@@ -0,0 +1,64 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_RESNET152CLASSIFYOPENCV_H
+#define MXBASE_RESNET152CLASSIFYOPENCV_H
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include <opencv2/opencv.hpp>
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "ClassPostProcessors/Resnet50PostProcess.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string labelPath;
+    uint32_t classNum;
+    uint32_t topk;
+    bool softmax;
+    bool checkTensor;
+    std::string modelPath;
+};
+
+class Resnet152ClassifyOpencv {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+    APP_ERROR DeInit();
+    APP_ERROR PreProcess(const std::string &imgPath, MxBase::TensorBase &tensorBase);
+    APP_ERROR Inference(std::vector<MxBase::TensorBase> &inputs,
+                        std::vector<MxBase::TensorBase> &outputs);
+    APP_ERROR PostProcess(std::vector<MxBase::TensorBase> &inputs,
+                          std::vector<std::vector<MxBase::ClassInfo>> &clsInfos);
+    APP_ERROR Process(const std::string &imgPath);
+    // get infer time
+    double GetInferCostMilliSec() const {return inferCostTimeMilliSec;}
+
+ private:
+    APP_ERROR SaveResult(const std::string &imgPath,
+                         std::vector<std::vector<MxBase::ClassInfo>> &batchClsInfos);
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    std::shared_ptr<MxBase::Resnet50PostProcess> post_;
+    MxBase::ModelDesc modelDesc_;
+    uint32_t deviceId_ = 0;
+    // infer time
+    double inferCostTimeMilliSec = 0.0;
+};
+
+#endif
diff --git a/official/cv/resnet/infer/ResNet152/mxbase/build.sh b/official/cv/resnet/infer/ResNet152/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1f120af92518a1d7f32f1f5a9cc70891a4c19529
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/mxbase/build.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+path_cur=$(dirname "$0")
+
+function check_env()
+{
+    # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user
+    if [ ! "${ASCEND_VERSION}" ]; then
+        echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}"
+    else
+        echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user"
+    fi
+
+    if [ ! "${ARCH_PATTERN}" ]; then
+        # set ARCH_PATTERN to ./ when it was not specified by user
+        echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}"
+    else
+        echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user"
+    fi
+}
+
+function build_resnet152()
+{
+    cd "$path_cur" || exit
+    rm -rf build
+    mkdir -p build
+    cd build || exit
+    cmake ..
+    make
+    ret=$?
+    if [ ${ret} -ne 0 ]; then
+        echo "Failed to build resnet152."
+        exit ${ret}
+    fi
+    make install
+}
+
+check_env
+build_resnet152
diff --git a/official/cv/resnet/infer/ResNet152/mxbase/main.cpp b/official/cv/resnet/infer/ResNet152/mxbase/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cd0c6695aa6e8ed1d5ff8b7f7da64277fea9b427
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/mxbase/main.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include "Resnet152ClassifyOpencv.h"
+#include "MxBase/Log/Log.h"
+
+namespace {
+const uint32_t CLASS_NUM = 1001;
+}
+
+APP_ERROR ReadFilesFromPath(const std::string &path, std::vector<std::string> *files) {
+    DIR *dir = NULL;
+    struct dirent *ptr = NULL;
+
+    if ((dir=opendir(path.c_str())) == NULL) {
+        LogError << "Open dir error: " << path;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+
+    while ((ptr=readdir(dir)) != NULL) {
+        // d_type == 8 is file
+        if (ptr->d_type == 8) {
+            files->push_back(path + ptr->d_name);
+        }
+    }
+    closedir(dir);
+    // sort ascending order
+    sort(files->begin(), files->end());
+    return APP_ERR_OK;
+}
+
+int main(int argc, char* argv[]) {
+    if (argc <= 1) {
+        LogWarn << "Please input image path, such as './resnet image_dir'.";
+        return APP_ERR_OK;
+    }
+
+    InitParam initParam = {};
+    initParam.deviceId = 0;
+    initParam.classNum = CLASS_NUM;
+    initParam.labelPath = "../data/config/imagenet1000_clsidx_to_labels.names";
+    initParam.topk = 5;
+    initParam.softmax = false;
+    initParam.checkTensor = true;
+    initParam.modelPath = "../data/model/resnet152_bs1_304.om";
+    auto resnet152 = std::make_shared<Resnet152ClassifyOpencv>();
+    APP_ERROR ret = resnet152->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "resnet152Classify init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::string inferPath = argv[1];
+    std::vector<std::string> files;
+    ret = ReadFilesFromPath(inferPath, &files);
+    if (ret != APP_ERR_OK) {
+        LogError << "Read files from path failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    auto startTime = std::chrono::high_resolution_clock::now();
+    for (uint32_t i = 0; i < files.size(); i++) {
+        ret = resnet152->Process(files[i]);
+        if (ret != APP_ERR_OK) {
+            LogError << "resnet152Classify process failed, ret=" << ret << ".";
+            resnet152->DeInit();
+            return ret;
+        }
+    }
+    auto endTime = std::chrono::high_resolution_clock::now();
+    resnet152->DeInit();
+    double costMilliSecs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    double fps = 1000.0 * files.size() / resnet152->GetInferCostMilliSec();
+    LogInfo << "[Process Delay] cost: " << costMilliSecs << " ms\tfps: " << fps << " imgs/sec";
+    return APP_ERR_OK;
+}
diff --git a/official/cv/resnet/infer/ResNet152/mxbase/run.sh b/official/cv/resnet/infer/ResNet152/mxbase/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ace816142f9dfdb654e5eae645c94d246bb6fc36
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/mxbase/run.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/lib/modelpostprocessors:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH}
+
+# run
+./resnet ../data/image_val/
diff --git a/official/cv/resnet/infer/ResNet152/sdk/classification_task_metric.py b/official/cv/resnet/infer/ResNet152/sdk/classification_task_metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c82151c1ad0b0fd8322b8e704e670a871963c27
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/sdk/classification_task_metric.py
@@ -0,0 +1,154 @@
+#coding = utf-8
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the BSD 3-Clause License  (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://opensource.org/licenses/BSD-3-Clause
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""classification task metric"""
+import os
+import sys
+import json
+import numpy as np
+
+
+np.set_printoptions(threshold=sys.maxsize)
+
+LABEL_FILE = "HiAI_label.json"
+
+def cre_groundtruth_dict(gtfile_path):
+    """
+    :param filename: file contains the imagename and label number
+    :return: dictionary key imagename, value is label number
+    """
+    img_gt_dict = {}
+    for gtfile in os.listdir(gtfile_path):
+        if gtfile != LABEL_FILE:
+            with open(os.path.join(gtfile_path, gtfile), 'r') as f:
+                gt = json.load(f)
+                image_name = os.path.splitext(gtfile.split('/')[-1])
+                img_gt_dict[image_name] = gt["image"]["annotations"][0]["category_id"]
+    return img_gt_dict
+
+
+def cre_groundtruth_dict_fromtxt(gtfile_path):
+    """
+    :param filename: file contains the imagename and label number
+    :return: dictionary key imagename, value is label number
+    """
+    img_gt_dict = {}
+    with open(gtfile_path, 'r')as f:
+        for line in f.readlines():
+            temp = line.strip().split(" ")
+            img_name = temp[0].split(".")[0]
+            img_gt_dict[img_name] = temp[1]
+    return img_gt_dict
+
+
+def load_statistical_predict_result(filepath):
+    """
+    function:
+    the prediction esult file data extraction
+    input:
+    result file:filepath
+    output:
+    n_label:numble of label
+    data_vec: the probabilitie of prediction in the 1000
+    :return: probabilities, numble of label, in_type, color
+    """
+    with open(filepath, 'r')as f:
+        temp = f.readline().strip().split(" ")
+        n_label = len(temp)
+        data_vec = np.zeros((len(temp)), dtype=np.float32)
+        if n_label != 0:
+            for ind, cls_ind in enumerate(temp):
+                data_vec[ind] = np.int(cls_ind)
+    return data_vec, n_label
+
+
+def create_visualization_statistical_result(prediction_file_path,
+                                            result_store_path, json_file_name,
+                                            img_gt_dict, topn=5):
+    """
+    :param prediction_file_path:
+    :param result_store_path:
+    :param json_file_name:
+    :param img_gt_dict:
+    :param topn:
+    :return:
+    """
+    writer = open(os.path.join(result_store_path, json_file_name), 'w')
+    table_dict = {}
+    table_dict["title"] = "Overall statistical evaluation"
+    table_dict["value"] = []
+
+    count = 0
+    res_cnt = 0
+    n_labels = ""
+    count_hit = np.zeros(topn)
+    for tfile_name in os.listdir(prediction_file_path):
+        count += 1
+        temp = tfile_name.split('.')[0]
+        index = temp.rfind('_')
+        img_name = temp[:index]
+        filepath = os.path.join(prediction_file_path, tfile_name)
+        prediction, n_labels = load_statistical_predict_result(filepath)
+        if n_labels == 1001:
+            real_label = int(img_gt_dict[img_name]) + 1
+        else:
+            real_label = int(img_gt_dict[img_name])
+        res_cnt = min(len(prediction), topn)
+        for i in range(res_cnt):
+            if real_label == int(prediction[i]):
+                count_hit[i] += 1
+                break
+    if 'value' not in table_dict.keys():
+        print("the item value does not exist!")
+    else:
+        table_dict["value"].extend(
+            [{"key": "Number of images", "value": str(count)},
+             {"key": "Number of classes", "value": str(n_labels)}])
+        accuracy = np.cumsum(count_hit) / count if count else 0
+        for i in range(res_cnt):
+            table_dict["value"].append({"key": "Top" + str(i + 1) + " accuracy",
+                                        "value": str(
+                                            round(accuracy[i] * 100, 2)) + '%'})
+        json.dump(table_dict, writer, indent=2)
+    writer.close()
+
+
+if __name__ == '__main__':
+    if len(sys.argv) == 5:
+        # txt file path
+        folder_davinci_target = sys.argv[1]
+        # annotation files path, "val_label.txt"
+        annotation_file_path = sys.argv[2]
+        # the path to store the results json path
+        result_json_path = sys.argv[3]
+        # result json file name
+        result_json_file_name = sys.argv[4]
+    else:
+        print("Please enter target file result folder | ground truth label file | result json file folder | "
+              "result json file name, such as ./result val_label.txt . result.json")
+        exit(1)
+
+    if not os.path.exists(folder_davinci_target):
+        print("Target file folder does not exist.")
+
+    if not os.path.exists(annotation_file_path):
+        print("Ground truth file does not exist.")
+
+    if not os.path.exists(result_json_path):
+        print("Result folder doesn't exist.")
+
+    img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path)
+    create_visualization_statistical_result(folder_davinci_target,
+                                            result_json_path, result_json_file_name,
+                                            img_label_dict, topn=5)
diff --git a/official/cv/resnet/infer/ResNet152/sdk/main.py b/official/cv/resnet/infer/ResNet152/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c39cc78f7fc6d9dd231de1c9b2d6eebed2bd7ab
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/sdk/main.py
@@ -0,0 +1,104 @@
+# coding=utf-8
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""run sdk"""
+import datetime
+import json
+import os
+import sys
+
+from StreamManagerApi import StreamManagerApi
+from StreamManagerApi import MxDataInput
+
+
+def run():
+    """run sdk"""
+    # init stream manager
+    stream_manager_api = StreamManagerApi()
+    ret = stream_manager_api.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        return
+
+    # create streams by pipeline config file
+    with open("./resnet152.pipeline", 'rb') as f:
+        pipelineStr = f.read()
+    ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
+
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        return
+
+    # Construct the input of the stream
+    data_input = MxDataInput()
+
+    dir_name = sys.argv[1]
+    res_dir_name = sys.argv[2]
+    file_list = os.listdir(dir_name)
+    if not os.path.exists(res_dir_name):
+        os.makedirs(res_dir_name)
+
+    image_count = 0
+    for file_name in file_list:
+        file_path = os.path.join(dir_name, file_name)
+        if not (file_name.lower().endswith(
+                ".jpg") or file_name.lower().endswith(".jpeg")):
+            continue
+
+        with open(file_path, 'rb') as f:
+            data_input.data = f.read()
+
+        stream_name = b'im_resnet152'
+        in_plugin_id = 0
+        unique_id = stream_manager_api.SendData(stream_name, in_plugin_id,
+                                                data_input)
+        if unique_id < 0:
+            print("Failed to send data to stream.")
+            return
+        # Obtain the inference result by specifying streamName and uniqueId.
+        start_time = datetime.datetime.now()
+        infer_result = stream_manager_api.GetResult(stream_name, unique_id)
+        end_time = datetime.datetime.now()
+        print('sdk run time: {}'.format((end_time - start_time).microseconds))
+        if infer_result.errorCode != 0:
+            print("GetResultWithUniqueId error. errorCode=%d, errorMsg=%s" % (
+                infer_result.errorCode, infer_result.data.decode()))
+            return
+        # print the infer result
+        infer_res = infer_result.data.decode()
+        print("process img{}: {}, infer result: {}".format(image_count, file_name, infer_res))
+        image_count = image_count + 1
+
+        load_dict = json.loads(infer_result.data.decode())
+        if load_dict.get('MxpiClass') is None:
+            with open(res_dir_name + "/" + file_name[:-5] + '.txt',
+                      'w') as f_write:
+                f_write.write("")
+            continue
+        res_vec = load_dict.get('MxpiClass')
+
+        with open(res_dir_name + "/" + file_name[:-5] + '_1.txt',
+                  'w') as f_write:
+            res_list = [str(item.get("classId")) + " " for item in res_vec]
+            f_write.writelines(res_list)
+            f_write.write('\n')
+
+    # destroy streams
+    stream_manager_api.DestroyAllStreams()
+
+
+if __name__ == '__main__':
+    run()
diff --git a/official/cv/resnet/infer/ResNet152/sdk/resnet152.pipeline b/official/cv/resnet/infer/ResNet152/sdk/resnet152.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..4fdf23d6e5769b2938107b93612e7b07f367af7b
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/sdk/resnet152.pipeline
@@ -0,0 +1,73 @@
+{
+    "im_resnet152": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc1": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_imagedecoder0"
+        },
+        "mxpi_imagedecoder0": {
+            "props": {
+                "handleMethod": "opencv"
+            },
+            "factory": "mxpi_imagedecoder",
+            "next": "mxpi_imageresize0"
+        },
+        "mxpi_imageresize0": {
+            "props": {
+                "handleMethod": "opencv",
+                "resizeType": "Resizer_Stretch",
+                "resizeHeight": "304",
+                "resizeWidth": "304"
+            },
+            "factory": "mxpi_imageresize",
+            "next": "mxpi_opencvcentercrop0"
+        },
+        "mxpi_opencvcentercrop0": {
+            "props": {
+                "dataSource": "mxpi_imageresize0",
+                "cropHeight": "224",
+                "cropWidth": "224"
+            },
+            "factory": "mxpi_opencvcentercrop",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "mxpi_opencvcentercrop0",
+                "modelPath": "../data/model/resnet152_bs1_304.om",
+                "waitingTime": "2000",
+                "outputDeviceId": "-1"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_classpostprocessor0"
+        },
+        "mxpi_classpostprocessor0": {
+            "props": {
+                "dataSource": "mxpi_tensorinfer0",
+                "postProcessConfigPath": "../data/config/resnet152.cfg",
+                "labelPath": "../data/config/imagenet1000_clsidx_to_labels.names",
+                "postProcessLibPath": "libresnet50postprocess.so"
+            },
+            "factory": "mxpi_classpostprocessor",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_classpostprocessor0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+    }
+}
diff --git a/official/cv/resnet/infer/ResNet152/sdk/run.sh b/official/cv/resnet/infer/ResNet152/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1c76b73e58c6973ecbd0ca6781cfa5da33495db0
--- /dev/null
+++ b/official/cv/resnet/infer/ResNet152/sdk/run.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+image_path=$1
+result_dir=$2
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3.7 main.py $image_path  $result_dir
+exit 0
diff --git a/official/cv/resnet/modelarts/ResNet152/config.py b/official/cv/resnet/modelarts/ResNet152/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4e93a532be2c0e65c1a533ab1063865fbbb1fc3
--- /dev/null
+++ b/official/cv/resnet/modelarts/ResNet152/config.py
@@ -0,0 +1,129 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""Parse arguments"""
+
+import os
+import ast
+import argparse
+from pprint import pprint, pformat
+import yaml
+
+
+class Config:
+    """
+    Configuration namespace. Convert dictionary to members.
+    """
+    def __init__(self, cfg_dict):
+        for k, v in cfg_dict.items():
+            if isinstance(v, (list, tuple)):
+                setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v])
+            else:
+                setattr(self, k, Config(v) if isinstance(v, dict) else v)
+
+    def __str__(self):
+        return pformat(self.__dict__)
+
+    def __repr__(self):
+        return self.__str__()
+
+
+def parse_cli_to_yaml(parser, cfg, helper=None, choices=None, cfg_path="resnet50_cifar10_config.yaml"):
+    """
+    Parse command line arguments to the configuration according to the default yaml.
+
+    Args:
+        parser: Parent parser.
+        cfg: Base configuration.
+        helper: Helper description.
+        cfg_path: Path to the default yaml config.
+    """
+    parser = argparse.ArgumentParser(description="[REPLACE THIS at config.py]",
+                                     parents=[parser])
+    helper = {} if helper is None else helper
+    choices = {} if choices is None else choices
+    for item in cfg:
+        if not isinstance(cfg[item], list) and not isinstance(cfg[item], dict):
+            help_description = helper[item] if item in helper else "Please reference to {}".format(cfg_path)
+            choice = choices[item] if item in choices else None
+            if isinstance(cfg[item], bool):
+                parser.add_argument("--" + item, type=ast.literal_eval, default=cfg[item], choices=choice,
+                                    help=help_description)
+            else:
+                parser.add_argument("--" + item, type=type(cfg[item]), default=cfg[item], choices=choice,
+                                    help=help_description)
+    args = parser.parse_args()
+    return args
+
+
+def parse_yaml(yaml_path):
+    """
+    Parse the yaml config file.
+
+    Args:
+        yaml_path: Path to the yaml config.
+    """
+    with open(yaml_path, 'r') as fin:
+        try:
+            cfgs = yaml.load_all(fin.read(), Loader=yaml.FullLoader)
+            cfgs = [x for x in cfgs]
+            if len(cfgs) == 1:
+                cfg_helper = {}
+                cfg = cfgs[0]
+                cfg_choices = {}
+            elif len(cfgs) == 2:
+                cfg, cfg_helper = cfgs
+                cfg_choices = {}
+            elif len(cfgs) == 3:
+                cfg, cfg_helper, cfg_choices = cfgs
+            else:
+                raise ValueError("At most 3 docs (config description for help, choices) are supported in config yaml")
+            print(cfg_helper)
+        except:
+            raise ValueError("Failed to parse yaml")
+    return cfg, cfg_helper, cfg_choices
+
+
+def merge(args, cfg):
+    """
+    Merge the base config from yaml file and command line arguments.
+
+    Args:
+        args: Command line arguments.
+        cfg: Base configuration.
+    """
+    args_var = vars(args)
+    for item in args_var:
+        cfg[item] = args_var[item]
+    return cfg
+
+
+def get_config():
+    """
+    Get Config according to the yaml file and cli arguments.
+    """
+    parser = argparse.ArgumentParser(description="default name", add_help=False)
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    parser.add_argument("--config_path", type=str, default=os.path.join(current_dir, \
+        "./resnet152_cifar10_config.yaml"), help="Config file path")
+    path_args, _ = parser.parse_known_args()
+    default, helper, choices = parse_yaml(path_args.config_path)
+    args = parse_cli_to_yaml(parser=parser, cfg=default, helper=helper, choices=choices, cfg_path=path_args.config_path)
+    final_config = merge(args, default)
+    pprint(final_config)
+    print("Please check the above information for the configurations", flush=True)
+    return Config(final_config)
+
+config = get_config()
diff --git a/official/cv/resnet/modelarts/ResNet152/resnet152_cifar10_config.yaml b/official/cv/resnet/modelarts/ResNet152/resnet152_cifar10_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d9ce685a6f7c1fc0877e4352cfff95d0d1cea31c
--- /dev/null
+++ b/official/cv/resnet/modelarts/ResNet152/resnet152_cifar10_config.yaml
@@ -0,0 +1,105 @@
+# Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing)
+enable_modelarts: True
+# Url for modelarts
+data_url: ""
+train_url: ""
+checkpoint_url: ""
+# Path for local
+run_distribute: False
+enable_profiling: False
+data_path: "/cache/data"
+output_path: "/cache/train"
+load_path: "/cache/checkpoint_path/"
+device_target: "Ascend"
+checkpoint_path: "./checkpoint/"
+checkpoint_file_path: ""
+
+# ==============================================================================
+# Training options
+optimizer: "Momentum"
+infer_label: ""
+class_num: 10
+batch_size: 32
+loss_scale: 1024
+momentum: 0.9
+weight_decay: 0.0001
+epoch_size: 2
+pretrain_epoch_size: 0
+save_checkpoint: True
+save_checkpoint_epochs: 5
+keep_checkpoint_max: 10
+save_checkpoint_path: "./"
+warmup_epochs: 0
+lr_decay_mode: "step"
+use_label_smooth: True
+label_smooth_factor: 0.1
+lr: 0.1
+lr_init: 0.0
+lr_max: 0.1
+lr_end: 0.0001
+lars_epsilon: 0.0
+lars_coefficient: 0.001
+
+net_name: "resnet152"
+dataset: "cifar10"
+device_num: 1
+pre_trained: ""
+run_eval: False
+eval_dataset_path: ""
+parameter_server: False
+filter_weight: False
+save_best_ckpt: True
+eval_start_epoch: 40
+eval_interval: 1
+enable_cache: False
+cache_session_id: ""
+mode_name: "GRAPH"
+boost_mode: "O0"
+acc_mode: "O0"
+conv_init: "XavierUniform"
+dense_init: "TruncatedNormal"
+all_reduce_fusion_config:
+    - 180
+    - 313
+train_image_size: 224
+eval_image_size: 224
+
+# Export options
+device_id: 0
+width: 224
+height: 224
+file_name: "resnet152"
+file_format: "MINDIR"
+ckpt_file: ""
+network_dataset: "resnet152_cifar10"
+
+# Retrain options
+save_graphs: False
+save_graphs_path: "./graphs"
+has_trained_epoch: 0
+has_trained_step: 0
+
+# postprocess resnet inference
+result_path: ''
+label_path: ''
+
+
+---
+# Help description for each configuration
+enable_modelarts: "Whether training on modelarts, default: False"
+data_url: "Dataset url for obs"
+checkpoint_url: "The location of checkpoint for obs"
+data_path: "Dataset path for local"
+output_path: "Training output path for local"
+load_path: "The location of checkpoint for obs"
+device_target: "Target device type, available: [Ascend, GPU, CPU]"
+enable_profiling: "Whether enable profiling while training, default: False"
+num_classes: "Class for dataset"
+batch_size: "Batch size for training and evaluation"
+epoch_size: "Total training epochs."
+checkpoint_path: "The location of the checkpoint file."
+checkpoint_file_path: "The location of the checkpoint file."
+result_path: "result files path."
+label_path: "image file path."
+save_graphs: "Whether save graphs during training, default: False."
+save_graphs_path: "Path to save graphs."
diff --git a/official/cv/resnet/modelarts/ResNet152/train_start.py b/official/cv/resnet/modelarts/ResNet152/train_start.py
new file mode 100644
index 0000000000000000000000000000000000000000..800fbb1e138f186daebddecbae2c2b8bf5a57ee0
--- /dev/null
+++ b/official/cv/resnet/modelarts/ResNet152/train_start.py
@@ -0,0 +1,426 @@
+# Copyright 2020-2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train resnet."""
+import datetime
+import glob
+import os
+import numpy as np
+import moxing as mox
+
+from mindspore import context
+from mindspore import Tensor
+from mindspore.nn.optim import Momentum, thor, LARS
+from mindspore.train.model import Model
+from mindspore.context import ParallelMode
+from mindspore.train.train_thor import ConvertModelUtils
+from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
+from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
+from mindspore.train.loss_scale_manager import FixedLossScaleManager
+from mindspore.train.serialization import export, load_checkpoint, load_param_into_net
+from mindspore.communication.management import init, get_rank
+from mindspore.common import set_seed
+from mindspore.parallel import set_algo_parameters
+import mindspore.nn as nn
+import mindspore.common.initializer as weight_init
+import mindspore.log as logger
+
+from modelarts.ResNet152.config import config
+
+from src.lr_generator import get_lr, warmup_cosine_annealing_lr
+from src.CrossEntropySmooth import CrossEntropySmooth
+from src.eval_callback import EvalCallBack
+from src.metric import DistAccuracy, ClassifyCorrectCell
+from src.model_utils.moxing_adapter import moxing_wrapper
+from src.model_utils.device_adapter import get_rank_id, get_device_num
+from src.resnet import conv_variance_scaling_initializer
+
+
+
+set_seed(1)
+
+
+class LossCallBack(LossMonitor):
+    """
+    Monitor the loss in training.
+    If the loss in NAN or INF terminating training.
+    """
+
+    def __init__(self, has_trained_epoch=0):
+        super(LossCallBack, self).__init__()
+        self.has_trained_epoch = has_trained_epoch
+
+    def step_end(self, run_context):
+        """step end"""
+        cb_params = run_context.original_args()
+        loss = cb_params.net_outputs
+
+        if isinstance(loss, (tuple, list)):
+            if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
+                loss = loss[0]
+
+        if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
+            loss = np.mean(loss.asnumpy())
+
+        cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
+
+        if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):
+            raise ValueError("epoch: {} step: {}. Invalid loss, terminating training.".format(
+                cb_params.cur_epoch_num, cur_step_in_epoch))
+        if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
+            print("epoch: %s step: %s, loss is %s" % (cb_params.cur_epoch_num + int(self.has_trained_epoch),
+                                                      cur_step_in_epoch, loss), flush=True)
+
+
+if config.net_name in ("resnet18", "resnet34", "resnet50", "resnet152"):
+    if config.net_name == "resnet18":
+        from src.resnet import resnet18 as resnet
+    elif config.net_name == "resnet34":
+        from src.resnet import resnet34 as resnet
+    elif config.net_name == "resnet50":
+        from src.resnet import resnet50 as resnet
+    else:
+        from src.resnet import resnet152 as resnet
+    if config.dataset == "cifar10":
+        from src.dataset import create_dataset1 as create_dataset
+    else:
+        if config.mode_name == "GRAPH":
+            from src.dataset import create_dataset2 as create_dataset
+        else:
+            from src.dataset import create_dataset_pynative as create_dataset
+elif config.net_name == "resnet101":
+    from src.resnet import resnet101 as resnet
+    from src.dataset import create_dataset3 as create_dataset
+else:
+    from src.resnet import se_resnet50 as resnet
+    from src.dataset import create_dataset4 as create_dataset
+
+
+def filter_checkpoint_parameter_by_list(origin_dict, param_filter):
+    """remove useless parameters according to filter_list"""
+    for key in list(origin_dict.keys()):
+        for name in param_filter:
+            if name in key:
+                print("Delete parameter from checkpoint: ", key)
+                del origin_dict[key]
+                break
+
+
+def apply_eval(eval_param):
+    eval_model = eval_param["model"]
+    eval_ds = eval_param["dataset"]
+    metrics_name = eval_param["metrics_name"]
+    res = eval_model.eval(eval_ds)
+    return res[metrics_name]
+
+
+def set_graph_kernel_context(run_platform, net_name):
+    if run_platform == "GPU" and net_name == "resnet101":
+        context.set_context(enable_graph_kernel=True)
+        context.set_context(graph_kernel_flags="--enable_parallel_fusion --enable_expand_ops=Conv2D")
+
+
+def set_parameter():
+    """set_parameter"""
+    target = config.device_target
+    if target == "CPU":
+        config.run_distribute = False
+
+    config.save_graphs = not config.pre_trained
+
+    # init context
+    if config.mode_name == 'GRAPH':
+        if target == "Ascend":
+            rank_save_graphs_path = os.path.join(config.save_graphs_path, "soma", str(os.getenv('DEVICE_ID')))
+            context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=config.save_graphs,
+                                save_graphs_path=rank_save_graphs_path)
+        else:
+            context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=config.save_graphs)
+        set_graph_kernel_context(target, config.net_name)
+    else:
+        context.set_context(mode=context.PYNATIVE_MODE, device_target=target, save_graphs=False)
+
+    if config.parameter_server:
+        context.set_ps_context(enable_ps=True)
+    if config.run_distribute:
+        if target == "Ascend":
+            device_id = int(os.getenv('DEVICE_ID'))
+            context.set_context(device_id=device_id)
+            context.set_auto_parallel_context(device_num=config.device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
+                                              gradients_mean=True)
+            set_algo_parameters(elementwise_op_strategy_follow=True)
+            if config.net_name == "resnet50" or config.net_name == "se-resnet50":
+                if config.boost_mode not in ["O1", "O2"]:
+                    context.set_auto_parallel_context(all_reduce_fusion_config=config.all_reduce_fusion_config)
+            elif config.net_name in ["resnet101", "resnet152"]:
+                context.set_auto_parallel_context(all_reduce_fusion_config=config.all_reduce_fusion_config)
+            init()
+        # GPU target
+        else:
+            init()
+            context.set_auto_parallel_context(device_num=get_device_num(),
+                                              parallel_mode=ParallelMode.DATA_PARALLEL,
+                                              gradients_mean=True)
+            if config.net_name == "resnet50":
+                context.set_auto_parallel_context(all_reduce_fusion_config=config.all_reduce_fusion_config)
+
+
+def load_pre_trained_checkpoint():
+    """
+    Load checkpoint according to pre_trained path.
+    """
+    param_dict = None
+    config.pre_trained = os.path.join(os.path.dirname(os.path.abspath(__file__)), config.pre_trained)
+    if config.pre_trained:
+        if os.path.isdir(config.pre_trained):
+            ckpt_save_dir = os.path.join(config.output_path, config.checkpoint_path, "ckpt_0")
+            ckpt_pattern = os.path.join(ckpt_save_dir, "*.ckpt")
+            ckpt_files = glob.glob(ckpt_pattern)
+            if not ckpt_files:
+                logger.warning(f"There is no ckpt file in {ckpt_save_dir}, "
+                               f"pre_trained is unsupported.")
+            else:
+                ckpt_files.sort(key=os.path.getmtime, reverse=True)
+                time_stamp = datetime.datetime.now()
+                print(f"time stamp {time_stamp.strftime('%Y.%m.%d-%H:%M:%S')}"
+                      f" pre trained ckpt model {ckpt_files[0]} loading",
+                      flush=True)
+                param_dict = load_checkpoint(ckpt_files[0])
+        elif os.path.isfile(config.pre_trained):
+            param_dict = load_checkpoint(config.pre_trained)
+        else:
+            print(f"Invalid pre_trained {config.pre_trained} parameter.")
+    return param_dict
+
+
+def init_weight(net, param_dict):
+    """init_weight"""
+    if config.pre_trained:
+        if param_dict:
+            if param_dict.get("epoch_num") and param_dict.get("step_num"):
+                config.has_trained_epoch = int(param_dict["epoch_num"].data.asnumpy())
+                config.has_trained_step = int(param_dict["step_num"].data.asnumpy())
+            else:
+                config.has_trained_epoch = 0
+                config.has_trained_step = 0
+
+            if config.filter_weight:
+                filter_list = [x.name for x in net.end_point.get_parameters()]
+                filter_checkpoint_parameter_by_list(param_dict, filter_list)
+            load_param_into_net(net, param_dict)
+    else:
+        for _, cell in net.cells_and_names():
+            if isinstance(cell, nn.Conv2d):
+                if config.conv_init == "XavierUniform":
+                    cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
+                                                                 cell.weight.shape,
+                                                                 cell.weight.dtype))
+                elif config.conv_init == "TruncatedNormal":
+                    weight = conv_variance_scaling_initializer(cell.in_channels,
+                                                               cell.out_channels,
+                                                               cell.kernel_size[0])
+                    cell.weight.set_data(weight)
+            if isinstance(cell, nn.Dense):
+                if config.dense_init == "TruncatedNormal":
+                    cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(),
+                                                                 cell.weight.shape,
+                                                                 cell.weight.dtype))
+                elif config.dense_init == "RandomNormal":
+                    in_channel = cell.in_channels
+                    out_channel = cell.out_channels
+                    weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)
+                    weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=cell.weight.dtype)
+                    cell.weight.set_data(weight)
+
+
+def init_lr(step_size):
+    """init lr"""
+    if config.optimizer == "Thor":
+        from src.lr_generator import get_thor_lr
+        lr = get_thor_lr(0, config.lr_init, config.lr_decay, config.lr_end_epoch, step_size, decay_epochs=39)
+    else:
+        if config.net_name in ("resnet18", "resnet34", "resnet50", "resnet152", "se-resnet50"):
+            lr = get_lr(lr_init=config.lr_init, lr_end=config.lr_end, lr_max=config.lr_max,
+                        warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size, steps_per_epoch=step_size,
+                        lr_decay_mode=config.lr_decay_mode)
+        else:
+            lr = warmup_cosine_annealing_lr(config.lr, step_size, config.warmup_epochs, config.epoch_size,
+                                            config.pretrain_epoch_size * step_size)
+    return lr
+
+
+def init_loss_scale():
+    if config.dataset == "imagenet2012":
+        if not config.use_label_smooth:
+            config.label_smooth_factor = 0.0
+        loss = CrossEntropySmooth(sparse=True, reduction="mean",
+                                  smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
+    else:
+        loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
+    return loss
+
+
+def init_group_params(net):
+    """init group params"""
+    decayed_params = []
+    no_decayed_params = []
+    for param in net.trainable_params():
+        if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
+            decayed_params.append(param)
+        else:
+            no_decayed_params.append(param)
+
+    group_params = [{'params': decayed_params, 'weight_decay': config.weight_decay},
+                    {'params': no_decayed_params},
+                    {'order_params': net.trainable_params()}]
+    return group_params
+
+
+def run_eval(target, model, ckpt_save_dir, cb):
+    """run_eval"""
+    if config.run_eval:
+        if config.eval_dataset_path is None or (not os.path.isdir(config.eval_dataset_path)):
+            raise ValueError("{} is not a existing path.".format(config.eval_dataset_path))
+        eval_dataset = create_dataset(dataset_path=config.eval_dataset_path, do_train=False,
+                                      batch_size=config.batch_size, train_image_size=config.train_image_size,
+                                      eval_image_size=config.eval_image_size,
+                                      target=target, enable_cache=config.enable_cache,
+                                      cache_session_id=config.cache_session_id)
+        eval_param_dict = {"model": model, "dataset": eval_dataset, "metrics_name": "acc"}
+        eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=config.eval_interval,
+                               eval_start_epoch=config.eval_start_epoch, save_best_ckpt=config.save_best_ckpt,
+                               ckpt_directory=ckpt_save_dir, besk_ckpt_name="best_acc.ckpt",
+                               metrics_name="acc")
+        cb += [eval_cb]
+
+
+def set_save_ckpt_dir():
+    """set save ckpt dir"""
+    ckpt_save_dir = os.path.join(config.output_path, config.checkpoint_path)
+    if config.enable_modelarts and config.run_distribute:
+        ckpt_save_dir = ckpt_save_dir + "ckpt_" + str(get_rank_id()) + "/"
+    else:
+        if config.run_distribute:
+            ckpt_save_dir = ckpt_save_dir + "ckpt_" + str(get_rank()) + "/"
+    return ckpt_save_dir
+
+
+def _get_last_ckpt(ckpt_dir):
+    ckpt_files = [ckpt_file for ckpt_file in os.listdir(ckpt_dir)
+                  if ckpt_file.endswith('.ckpt')]
+    if not ckpt_files:
+        print("No ckpt file found.")
+        return None
+
+    return os.path.join(ckpt_dir, sorted(ckpt_files)[-1])
+
+
+def _export_air(ckpt_dir):
+    """export air"""
+    ckpt_file = _get_last_ckpt(ckpt_dir)
+    if not ckpt_file:
+        return
+    net = resnet(config.class_num)
+    param_dict = load_checkpoint(ckpt_file)
+    load_param_into_net(net, param_dict)
+
+    input_arr = Tensor(np.zeros([1, 3, 304, 304],
+                                np.float32))
+    print("Start export air.")
+    export(net, input_arr, file_name=config.file_name,
+           file_format="AIR")
+    file_name = config.file_name + ".air"
+    mox.file.copy(file_name, os.path.join(config.output_path, file_name))
+    print("Export success.")
+
+
+@moxing_wrapper()
+def train_net():
+    """train net"""
+    target = config.device_target
+    set_parameter()
+    ckpt_param_dict = load_pre_trained_checkpoint()
+    dataset = create_dataset(dataset_path=config.data_path, do_train=True, repeat_num=1,
+                             batch_size=config.batch_size, train_image_size=config.train_image_size,
+                             eval_image_size=config.eval_image_size, target=target,
+                             distribute=config.run_distribute)
+    step_size = dataset.get_dataset_size()
+    net = resnet(class_num=config.class_num)
+    if config.parameter_server:
+        net.set_param_ps()
+
+    init_weight(net=net, param_dict=ckpt_param_dict)
+    lr = Tensor(init_lr(step_size=step_size))
+    # define opt
+    group_params = init_group_params(net)
+    opt = Momentum(group_params, lr, config.momentum, loss_scale=config.loss_scale)
+    if config.optimizer == "LARS":
+        opt = LARS(opt, epsilon=config.lars_epsilon, coefficient=config.lars_coefficient,
+                   lars_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name and 'bias' not in x.name)
+    loss = init_loss_scale()
+    loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
+    dist_eval_network = ClassifyCorrectCell(net) if config.run_distribute else None
+    metrics = {"acc"}
+    if config.run_distribute:
+        metrics = {'acc': DistAccuracy(batch_size=config.batch_size, device_num=config.device_num)}
+    if (config.net_name not in ("resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "se-resnet50")) or \
+            config.parameter_server or target == "CPU":
+        ## fp32 training
+        model = Model(net, loss_fn=loss, optimizer=opt, metrics=metrics, eval_network=dist_eval_network)
+    else:
+        model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=metrics,
+                      amp_level="O2",  # boost_level=config.boost_mode,
+                      keep_batchnorm_fp32=False,
+                      eval_network=dist_eval_network)
+
+    if config.optimizer == "Thor" and config.dataset == "imagenet2012":
+        from src.lr_generator import get_thor_damping
+        damping = get_thor_damping(0, config.damping_init, config.damping_decay, 70, step_size)
+        split_indices = [26, 53]
+        opt = thor(net, lr, Tensor(damping), config.momentum, config.weight_decay, config.loss_scale,
+                   config.batch_size, split_indices=split_indices, frequency=config.frequency)
+        model = ConvertModelUtils().convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=opt,
+                                                          loss_scale_manager=loss_scale, metrics={'acc'},
+                                                          amp_level="O2", keep_batchnorm_fp32=False)
+        config.run_eval = False
+        logger.warning("Thor optimizer not support evaluation while training.")
+
+    # define callbacks
+    time_cb = TimeMonitor(data_size=step_size)
+    loss_cb = LossCallBack(config.has_trained_epoch)
+    cb = [time_cb, loss_cb]
+    ckpt_save_dir = set_save_ckpt_dir()
+    if config.save_checkpoint:
+        ckpt_append_info = [{"epoch_num": config.has_trained_epoch, "step_num": config.has_trained_step}]
+        config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
+                                     keep_checkpoint_max=config.keep_checkpoint_max,
+                                     append_info=ckpt_append_info)
+        ckpt_cb = ModelCheckpoint(prefix="resnet", directory=ckpt_save_dir, config=config_ck)
+        cb += [ckpt_cb]
+    run_eval(target, model, ckpt_save_dir, cb)
+    # train model
+    if config.net_name == "se-resnet50":
+        config.epoch_size = config.train_epoch_size
+    dataset_sink_mode = (not config.parameter_server) and target != "CPU"
+    config.pretrain_epoch_size = config.has_trained_epoch
+    model.train(config.epoch_size - config.pretrain_epoch_size, dataset, callbacks=cb,
+                sink_size=dataset.get_dataset_size(), dataset_sink_mode=dataset_sink_mode)
+
+    if config.run_eval and config.enable_cache:
+        print("Remember to shut down the cache server via \"cache_admin --stop\"")
+
+    _export_air(ckpt_save_dir)
+
+if __name__ == '__main__':
+    train_net()
diff --git a/official/cv/resnet/scripts/docker_start.sh b/official/cv/resnet/scripts/docker_start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..882c28e4926bafc36e30489091b068e1edb8dd2c
--- /dev/null
+++ b/official/cv/resnet/scripts/docker_start.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+docker_image=$1
+data_dir=$2
+model_dir=$3
+
+docker run -it --ipc=host \
+               --device=/dev/davinci0 \
+               --device=/dev/davinci1 \
+               --device=/dev/davinci2 \
+               --device=/dev/davinci3 \
+               --device=/dev/davinci4 \
+               --device=/dev/davinci5 \
+               --device=/dev/davinci6 \
+               --device=/dev/davinci7 \
+               --device=/dev/davinci_manager \
+               --device=/dev/devmm_svm \
+               --device=/dev/hisi_hdc \
+               --privileged \
+               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons \
+               -v ${data_dir}:${data_dir} \
+               -v ${model_dir}:${model_dir} \
+               -v /root/ascend/log:/root/ascend/log ${docker_image} /bin/bash