diff --git a/official/cv/yolov3_darknet53/src/yolo_dataset.py b/official/cv/yolov3_darknet53/src/yolo_dataset.py
index ddb4fdb07cca78bba64c686c88f370212dcff9cf..36faf1d5c61240f029aa58c821861d67d18a5de6 100644
--- a/official/cv/yolov3_darknet53/src/yolo_dataset.py
+++ b/official/cv/yolov3_darknet53/src/yolo_dataset.py
@@ -147,7 +147,6 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, max_epoch, device_num,
                         config=None, is_training=True, shuffle=True):
     """Create dataset for YOLOV3."""
     cv2.setNumThreads(0)
-    de.config.set_seed(100)
 
     if is_training:
         filter_crowd = True
diff --git a/official/cv/yolov3_darknet53/train.py b/official/cv/yolov3_darknet53/train.py
index 0d29432115642852857779c190856b49719a8123..22cfd4141185886aa8aa3a63cef338c71c0d1929 100644
--- a/official/cv/yolov3_darknet53/train.py
+++ b/official/cv/yolov3_darknet53/train.py
@@ -41,7 +41,7 @@ from model_utils.config import config
 from model_utils.moxing_adapter import moxing_wrapper
 from model_utils.device_adapter import get_device_id, get_device_num
 
-set_seed(100)
+set_seed(1)
 
 class BuildTrainNetwork(nn.Cell):
     def __init__(self, network, criterion):
diff --git a/research/cv/centernet_det/default_config.yaml b/research/cv/centernet_det/default_config.yaml
index 30d1990a15140dadd2ebe92dedb7cb5116ab0a09..bd125cc168ab0eb340d653874c30756d2b644d90 100644
--- a/research/cv/centernet_det/default_config.yaml
+++ b/research/cv/centernet_det/default_config.yaml
@@ -22,16 +22,23 @@ save_result_dir: ""
 device_id: 0
 device_num: 1
 
+filter_weight: 'false'
 distribute: 'false'
 need_profiler: "false"
 profiler_path: "./profiler"
 epoch_size: 1
+batch_size: ""
+num_classes: ""
+lr_schedule: ""
+learning_rate: ""
+multi_epochs: ""
+end_learning_rate: ""
 train_steps: -1
 enable_save_ckpt: "true"
 do_shuffle: "true"
 enable_data_sink: "true"
 data_sink_steps: -1
-save_checkpoint_path: ""
+save_checkpoint_path: "checkpoints"
 load_checkpoint_path: ""
 save_checkpoint_steps: 1221
 save_checkpoint_num: 1
@@ -228,8 +235,8 @@ eval_config:
 export_config:
     input_res: dataset_config.input_res
     ckpt_file: "./ckpt_file.ckpt"
-    export_format: "MINDIR"
-    export_name: "CenterNet_ObjectDetection"
+    export_format: "AIR"
+    export_name: "CenterNet_Hourglass"
 
 ---
 # Help description for each configuration
@@ -272,4 +279,4 @@ need_profiler: ["true", "false"]
 enable_save_ckpt: ["true", "false"]
 do_shuffle: ["true", "false"]
 enable_data_sink: ["true", "false"]
-export_format: ["MINDIR"]
+export_format: ["MINDIR", "AIR"]
diff --git a/research/cv/centernet_det/infer/Dockerfile b/research/cv/centernet_det/infer/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..595f1273dcf03cbb5e80c8498f4ddd615d24a9de
--- /dev/null
+++ b/research/cv/centernet_det/infer/Dockerfile
@@ -0,0 +1,7 @@
+ARG FROM_IMAGE_NAME
+FROM ${FROM_IMAGE_NAME}
+
+RUN ln -s  /usr/local/python3.7.5/bin/python3.7 /usr/bin/python
+
+COPY requirements.txt .
+RUN pip3.7 install -r requirements.txt
diff --git a/research/cv/centernet_det/infer/convert/convert.sh b/research/cv/centernet_det/infer/convert/convert.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d03a9aab298f77c8ccac65bf9dee76fe98b1b1bc
--- /dev/null
+++ b/research/cv/centernet_det/infer/convert/convert.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+model_path=$1
+output_model_name=$2
+
+/usr/local/Ascend/atc/bin/atc \
+--model=$model_path \
+--framework=1 \
+--output=$output_model_name \
+--input_format=NCHW --input_shape="actual_input_1:1,3,512,512" \
+--enable_small_channel=1 \
+--log=error \
+--soc_version=Ascend310 \
+--op_select_implmode=high_precision \
+--buffer_optimize=off_optimize \
+--precision_mode=allow_fp32_to_fp16
diff --git a/research/cv/centernet_det/infer/data/config/centernet_coco2017.cfg b/research/cv/centernet_det/infer/data/config/centernet_coco2017.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..2073cec28282666a365f2b152ef0e0cb42b56dd6
--- /dev/null
+++ b/research/cv/centernet_det/infer/data/config/centernet_coco2017.cfg
@@ -0,0 +1,5 @@
+# hyper-parameter
+CLASS_NUM=80
+SCORE_THRESH=0.7
+
+RPN_MAX_NUM=100
diff --git a/research/cv/centernet_det/infer/data/config/coco2017.names b/research/cv/centernet_det/infer/data/config/coco2017.names
new file mode 100644
index 0000000000000000000000000000000000000000..a15dc95ecc1b4b55a13a86a834a02df01115653b
--- /dev/null
+++ b/research/cv/centernet_det/infer/data/config/coco2017.names
@@ -0,0 +1,81 @@
+# This file is originally from http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip
+person
+bicycle
+car
+motorcycle
+airplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+backpack
+umbrella
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+couch
+pottedplant
+bed
+diningtable
+toilet
+tv
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush
diff --git a/research/cv/centernet_det/infer/docker_start_infer.sh b/research/cv/centernet_det/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..dc5b8107927346c19788a2c1538c3aefced5546f
--- /dev/null
+++ b/research/cv/centernet_det/infer/docker_start_infer.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+share_dir=$2
+data_dir=$3
+echo "$1"
+echo "$2"
+if [ -z "${docker_image}" ]; then
+    echo "please input docker_image"
+    exit 1
+fi
+
+if [ ! -d "${share_dir}" ]; then
+    echo "please input share directory that contains dataset, models and codes"
+    exit 1
+fi
+
+
+docker run -it \
+    --device=/dev/davinci0 \
+    --device=/dev/davinci_manager \
+    --device=/dev/devmm_svm \
+    --device=/dev/hisi_hdc \
+    --privileged \
+    -v //usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
+    -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+    -v ${data_dir}:${data_dir}  \
+    -v ${share_dir}:${share_dir} \
+    ${docker_image} \
+    /bin/bash
\ No newline at end of file
diff --git a/research/cv/centernet_det/infer/mxbase/CMakeLists.txt b/research/cv/centernet_det/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ce3a0f0e87ce49d553cc34d33dba154189cf28f9
--- /dev/null
+++ b/research/cv/centernet_det/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,51 @@
+cmake_minimum_required(VERSION 3.5.2)
+
+SET(CMAKE_BUILD_TYPE "Debug")
+SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}  -std=c++0x")
+SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb")
+SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
+project(centernetpost)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+#set(PLUGIN_NAME "centernet_mindspore_post")
+set(TARGET_LIBRARY centernet_mindspore_post)
+set(TARGET_MAIN centernet_mindspore)
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+#message("ACL_LIB_PATH:${ACL_LIB_PATH}/lib64/.")
+#include_directories(${CMAKE_CURRENT_BINARY_DIR})
+include_directories(${ACL_LIB_PATH}/include)
+link_directories(${ACL_LIB_PATH}/lib64/)
+
+add_library(${TARGET_LIBRARY} SHARED src/PostProcess/CenterNetMindsporePost.cpp)
+
+target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0)
+target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxpidatatype mxbase)
+target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack)
+
+message("TARGET_LIBRARY:${TARGET_LIBRARY}.")
+
+
+add_executable(${TARGET_MAIN} src/main.cpp src/CenterNet.cpp)
+target_link_libraries(${TARGET_MAIN} ${TARGET_LIBRARY} glog  cpprest mxbase libascendcl.so
+libruntime.so libopencv_world.so.4.3 opencv_world)
+
diff --git a/research/cv/centernet_det/infer/mxbase/build.sh b/research/cv/centernet_det/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..71f32675f9e85486ab52650f904689892a72f02b
--- /dev/null
+++ b/research/cv/centernet_det/infer/mxbase/build.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+
+# env
+if [ -d build ]; then
+    rm -rf build
+fi
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make);
+    then
+      echo "make failed."
+      return 1
+    fi
+
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
diff --git a/research/cv/centernet_det/infer/mxbase/src/CenterNet.cpp b/research/cv/centernet_det/infer/mxbase/src/CenterNet.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a0da6f92cdc4b0fad4fee54c5bbd060e5a458ab5
--- /dev/null
+++ b/research/cv/centernet_det/infer/mxbase/src/CenterNet.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <algorithm>
+#include <utility>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include "CenterNet.h"
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+#include <boost/property_tree/json_parser.hpp>
+
+namespace {
+    const uint32_t YUV_BYTE_NU = 3;
+    const uint32_t YUV_BYTE_DE = 2;
+    const uint32_t MODEL_HEIGHT = 512;
+    const uint32_t MODEL_WIDTH = 512;
+}
+
+void PrintTensorShape(const std::vector<MxBase::TensorDesc> &tensorDescVec, const std::string &tensorName) {
+    LogInfo << "The shape of " << tensorName << " is as follows:";
+    for (size_t i = 0; i < tensorDescVec.size(); ++i) {
+        LogInfo << "  Tensor " << i << ":";
+        for (size_t j = 0; j < tensorDescVec[i].tensorDims.size(); ++j) {
+            LogInfo << "   dim: " << j << ": " << tensorDescVec[i].tensorDims[j];
+        }
+    }
+}
+
+APP_ERROR CenterNet::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
+    ret = dvppWrapper_->Init();
+    if (ret != APP_ERR_OK) {
+        LogError << "DvppWrapper init failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    PrintTensorShape(modelDesc_.inputTensors, "Model Input Tensors");
+    PrintTensorShape(modelDesc_.outputTensors, "Model Output Tensors");
+
+    MxBase::ConfigData configData;
+    const std::string softmax = initParam.softmax ? "true" : "false";
+    const std::string checkTensor = initParam.checkTensor ? "true" : "false";
+
+    configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum));
+    configData.SetJsonValue("TOP_K", std::to_string(initParam.topk));
+    configData.SetJsonValue("SOFTMAX", softmax);
+    configData.SetJsonValue("CHECK_MODEL", checkTensor);
+
+    auto jsonStr = configData.GetCfgJson().serialize();
+    std::map<std::string, std::shared_ptr<void>> config;
+    config["postProcessConfigContent"] = std::make_shared<std::string>(jsonStr);
+    config["labelPath"] = std::make_shared<std::string>(initParam.labelPath);
+
+    post_ = std::make_shared<MxBase::CenterNetMindsporePost>();
+    ret = post_->Init(config);
+    if (ret != APP_ERR_OK) {
+        LogError << "CenterNetPostProcess init failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNet::DeInit() {
+    dvppWrapper_->DeInit();
+    model_->DeInit();
+    post_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNet::ReadImage(const std::string &imgPath, cv::Mat &imageMat, ImageShape &imgShape) {
+    imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    imgShape.width = imageMat.cols;
+    imgShape.height = imageMat.rows;
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNet::Resize_Affine(const cv::Mat &srcImage, cv::Mat &dstImage, ImageShape &imgShape) {
+    int new_width, new_height;
+    new_height = static_cast<int>(imgShape.height);
+    new_width = static_cast<int>(imgShape.width);
+    float ss = static_cast<float>(YUV_BYTE_DE);
+    cv::Mat src(new_height, new_width, CV_8UC3, srcImage.data);
+    cv::Point2f srcPoint2f[3], dstPoint2f[3];
+    int max_h_w = std::max(static_cast<int>(imgShape.width), static_cast<int>(imgShape.height));
+    srcPoint2f[0] = cv::Point2f(static_cast<float>(new_width / ss),
+                                static_cast<float>(new_height / ss));
+    srcPoint2f[1] = cv::Point2f(static_cast<float>(new_width / ss),
+                                static_cast<float>((new_height - max_h_w) / ss));
+    srcPoint2f[2] = cv::Point2f(static_cast<float>((new_width - max_h_w) / ss),
+                                static_cast<float>((new_height - max_h_w) / ss));
+    dstPoint2f[0] = cv::Point2f(static_cast<float>(MODEL_WIDTH) / ss,
+                                static_cast<float>(MODEL_HEIGHT) / ss);
+    dstPoint2f[1] = cv::Point2f(static_cast<float>(MODEL_WIDTH) / ss, 0.0);
+    dstPoint2f[2] = cv::Point2f(0.0, 0.0);
+
+    cv::Mat warp_mat(2, 3, CV_32FC1);
+    warp_mat = cv::getAffineTransform(srcPoint2f, dstPoint2f);
+    cv::Mat warp_dst = cv::Mat::zeros(cv::Size(static_cast<int>(MODEL_HEIGHT), static_cast<int>(MODEL_WIDTH)),
+                                      src.type());
+    cv::warpAffine(src, warp_dst, warp_mat, warp_dst.size());
+    cv::Mat dst;
+    warp_dst.convertTo(dst, CV_32F);
+    dstImage = dst;
+    // nomalization
+    float mean[3] = {0.40789654, 0.44719302, 0.47026115};
+    float std[3] = {0.28863828, 0.27408164, 0.27809835};
+    std::vector<cv::Mat> channels;
+    cv::split(dstImage, channels);
+    cv::Mat blue, green, red;
+    blue = channels.at(0);
+    green = channels.at(1);
+    red = channels.at(2);
+    cv::Mat B, G, R;
+    float mm = 255;
+    B = ((blue/mm) - mean[0]) / std[0];
+    G = ((green/mm) - mean[1]) / std[1];
+    R = ((red/mm) - mean[2]) / std[2];
+    std::vector<cv::Mat> channels2;
+    channels2.push_back(B);
+    channels2.push_back(G);
+    channels2.push_back(R);
+    cv::merge(channels2, dstImage);
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNet::CVMatToTensorBase(std::vector<float> &imageData, MxBase::TensorBase &tensorBase) {
+    const uint32_t dataSize = MODEL_HEIGHT * MODEL_WIDTH * MxBase::YUV444_RGB_WIDTH_NU * YUV_BYTE_DE * YUV_BYTE_DE;
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(reinterpret_cast<void*>(&imageData[0]),
+                                     dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+    std::vector<uint32_t> shape = {MODEL_HEIGHT * MxBase::YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(MODEL_WIDTH)};
+    tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_FLOAT32);
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNet::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                                      std::vector<MxBase::TensorBase> &outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs.push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    dynamicInfo.batchSize = 1;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();  // save time
+    inferCostTimeMilliSec += costMs;
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNet::PostProcess(const std::vector<MxBase::TensorBase> &inputs,
+                                  std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos,
+                                  const std::vector<MxBase::ResizedImageInfo> &resizedImageInfos,
+                                  const std::map<std::string, std::shared_ptr<void>> &configParamMap) {
+    APP_ERROR ret = post_->Process(inputs, objectInfos, resizedImageInfos, configParamMap);
+    if (ret != APP_ERR_OK) {
+        LogError << "Process failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+void SaveInferResult(const std::vector<MxBase::ObjectInfo> &objInfos, const std::string &resultPath) {
+    if (objInfos.empty()) {
+        LogWarn << "The predict result is empty.";
+        return;
+    }
+
+    namespace pt = boost::property_tree;
+    pt::ptree root, data;
+    int index = 0;
+    for (auto &obj : objInfos) {
+        ++index;
+        LogInfo << "BBox[" << index << "]:[x0=" << obj.x0 << ", y0=" << obj.y0 << ", x1=" << obj.x1 << ", y1=" << obj.y1
+                << "], confidence=" << obj.confidence << ", classId=" << obj.classId << ", className=" << obj.className
+                << std::endl;
+        pt::ptree item;
+        item.put("classId", obj.classId);
+        item.put("className", obj.className);
+        item.put("confidence", obj.confidence);
+        item.put("x0", obj.x0);
+        item.put("y0", obj.y0);
+        item.put("x1", obj.x1);
+        item.put("y1", obj.y1);
+
+        data.push_back(std::make_pair("", item));
+    }
+    root.add_child("data", data);
+    pt::json_parser::write_json(resultPath, root, std::locale(), true);
+}
+
+APP_ERROR CenterNet::Process(const std::string &imgPath, const std::string &resultPath) {
+    cv::Mat imageMat;
+    ImageShape imageShape{};
+    APP_ERROR ret = ReadImage(imgPath, imageMat, imageShape);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImage failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    cv::Mat dstImage(MODEL_WIDTH, MODEL_HEIGHT, CV_32FC3);
+    Resize_Affine(imageMat, dstImage, imageShape);
+
+    std::vector<float> dst_data;
+    std::vector<cv::Mat> bgrChannels(3);
+    cv::split(dstImage, bgrChannels);
+    for (std::size_t i = 0; i < bgrChannels.size(); i++) {
+        std::vector<float> data = std::vector<float>(bgrChannels[i].reshape(1, 1));
+        dst_data.insert(dst_data.end(), data.begin(), data.end());
+    }
+
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs = {};
+    MxBase::TensorBase tensorBase;
+    ret = CVMatToTensorBase(dst_data, tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+    inputs.push_back(tensorBase);
+    ret = Inference(inputs, outputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::vector<std::vector<MxBase::ObjectInfo>> objectInfos = {};
+    std::vector<MxBase::ResizedImageInfo> resizedImageInfos = {};
+    MxBase::ResizedImageInfo imgInfo = {
+      MODEL_WIDTH, MODEL_HEIGHT, imageShape.width, imageShape.height, MxBase::RESIZER_STRETCHING, 0.0};
+    resizedImageInfos.push_back(imgInfo);
+    std::map<std::string, std::shared_ptr<void>> configParamMap = {};
+    ret = PostProcess(outputs, objectInfos, resizedImageInfos, configParamMap);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::vector<MxBase::ObjectInfo> objects = objectInfos.at(0);
+    SaveInferResult(objects, resultPath);
+    return APP_ERR_OK;
+}
diff --git a/research/cv/centernet_det/infer/mxbase/src/CenterNet.h b/research/cv/centernet_det/infer/mxbase/src/CenterNet.h
new file mode 100644
index 0000000000000000000000000000000000000000..59a5465ecfa9d9593df5c44e745ed6783c84918c
--- /dev/null
+++ b/research/cv/centernet_det/infer/mxbase/src/CenterNet.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CENTERNETPOST_CENTERNET_H
+#define CENTERNETPOST_CENTERNET_H
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include <opencv2/opencv.hpp>
+
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/PostProcessBases/ObjectPostProcessBase.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+#include "PostProcess/CenterNetMindsporePost.h"
+#include "MxBase/DeviceManager/DeviceManager.h"
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string labelPath;
+    uint32_t classNum;
+    uint32_t topk;
+    bool softmax;
+    bool checkTensor;
+    std::string modelPath;
+};
+
+struct ImageShape {
+    uint32_t width;
+    uint32_t height;
+};
+
+class CenterNet {
+ public:
+        APP_ERROR Init(const InitParam &initParam);
+        APP_ERROR DeInit();
+        APP_ERROR ReadImage(const std::string &imgPath, cv::Mat &imageMat, ImageShape &imgShape);
+        APP_ERROR Resize_Affine(const cv::Mat &srcImage, cv::Mat &dstImage, ImageShape &imgShape);
+        APP_ERROR CVMatToTensorBase(std::vector<float> &imageData, MxBase::TensorBase &tensorBase);
+        APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
+        APP_ERROR PostProcess(const std::vector<MxBase::TensorBase> &inputs,
+                          std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos,
+                          const std::vector<MxBase::ResizedImageInfo> &resizedImageInfos,
+                          const std::map<std::string, std::shared_ptr<void>> &configParamMap);
+        APP_ERROR Process(const std::string &imgPath, const std::string &resultPath);
+        // get infer time
+        double GetInferCostMilliSec() const {return inferCostTimeMilliSec;}
+
+ private:
+        std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
+        std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+        std::shared_ptr<MxBase::CenterNetMindsporePost> post_;
+        MxBase::ModelDesc modelDesc_;
+        uint32_t deviceId_ = 0;
+        // infer time
+        double inferCostTimeMilliSec = 0.0;
+};
+
+#endif
diff --git a/research/cv/centernet_det/infer/mxbase/src/PostProcess/CenterNetMindsporePost.cpp b/research/cv/centernet_det/infer/mxbase/src/PostProcess/CenterNetMindsporePost.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ac1a4fbc7eee1fc1696e980a20eb5b6e539f9701
--- /dev/null
+++ b/research/cv/centernet_det/infer/mxbase/src/PostProcess/CenterNetMindsporePost.cpp
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <boost/property_tree/json_parser.hpp>
+#include <opencv4/opencv2/core.hpp>
+#include <opencv4/opencv2/opencv.hpp>
+#include "acl/acl.h"
+#include "CenterNetMindsporePost.h"
+#include "MxBase/CV/ObjectDetection/Nms/Nms.h"
+
+namespace {
+// Output Tensor
+const int OUTPUT_TENSOR_SIZE = 1;
+const int OUTPUT_BBOX_SIZE = 3;
+const int OUTPUT_BBOX_TWO_INDEX_SHAPE = 6;
+const int OUTPUT_BBOX_INDEX = 0;
+// index
+const int YUV_DE = 2;
+const int YUV_NU = 4;
+const int BBOX_INDEX_LX = 0;
+const int BBOX_INDEX_LY = 1;
+const int BBOX_INDEX_RX = 2;
+const int BBOX_INDEX_RY = 3;
+const int BBOX_INDEX_SCORE = 4;
+const int BBOX_INDEX_CLASS = 5;
+const int BBOX_INDEX_SCALE_NUM = 6;
+float coco_class_nameid[80] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+                       22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+                       46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+                       67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90};
+}  // namespace
+
+namespace MxBase {
+
+CenterNetMindsporePost &CenterNetMindsporePost::operator=(const CenterNetMindsporePost &other) {
+    if (this == &other) {
+        return *this;
+    }
+    ObjectPostProcessBase::operator=(other);
+    return *this;
+}
+
+APP_ERROR CenterNetMindsporePost::ReadConfigParams() {
+    APP_ERROR ret = configData_.GetFileValue<uint32_t>("CLASS_NUM", classNum_);
+    if (ret != APP_ERR_OK) {
+        LogWarn << GetError(ret) << "No CLASS_NUM in config file, default value(" << classNum_ << ").";
+    }
+
+    ret = configData_.GetFileValue<uint32_t>("RPN_MAX_NUM", rpnMaxNum_);
+    if (ret != APP_ERR_OK) {
+        LogWarn << GetError(ret) << "No RPN_MAX_NUM in config file, default value(" << rpnMaxNum_ << ").";
+    }
+
+
+    LogInfo << "The config parameters of post process are as follows: \n";
+    LogInfo << " CLASS_NUM: " << classNum_;
+    LogInfo << " RPN_MAX_NUM: " << rpnMaxNum_;
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNetMindsporePost::Init(const std::map<std::string, std::shared_ptr<void>> &postConfig) {
+    LogInfo << "Begin to initialize CenterNetMindsporePost.";
+    APP_ERROR ret = ObjectPostProcessBase::Init(postConfig);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Fail to superinit  in ObjectPostProcessBase.";
+        return ret;
+    }
+
+    ReadConfigParams();
+    LogInfo << "End to initialize CenterNetMindsporePost.";
+    return APP_ERR_OK;
+}
+
+APP_ERROR CenterNetMindsporePost::DeInit() {
+    LogInfo << "Begin to deinitialize CenterNetMindsporePost.";
+    LogInfo << "End to deinitialize CenterNetMindsporePost.";
+    return APP_ERR_OK;
+}
+
+bool CenterNetMindsporePost::IsValidTensors(const std::vector<TensorBase> &tensors) const {
+    if (tensors.size() < OUTPUT_TENSOR_SIZE) {
+        LogError << "The number of tensor (" << tensors.size() << ") is less than required (" << OUTPUT_TENSOR_SIZE
+                 << ")";
+        return false;
+    }
+
+    auto bboxShape = tensors[OUTPUT_BBOX_INDEX].GetShape();
+    if (bboxShape.size() != OUTPUT_BBOX_SIZE) {
+        LogError << "The number of tensor[" << OUTPUT_BBOX_INDEX << "] dimensions (" << bboxShape.size()
+                 << ") is not equal to (" << OUTPUT_BBOX_SIZE << ")";
+        return false;
+    }
+
+    if (bboxShape[VECTOR_SECOND_INDEX] != rpnMaxNum_) {
+        LogError << "The output tensor is mismatched: " << rpnMaxNum_ << "/" << bboxShape[VECTOR_SECOND_INDEX] << ").";
+        return false;
+    }
+
+    if (bboxShape[VECTOR_THIRD_INDEX] != OUTPUT_BBOX_TWO_INDEX_SHAPE) {
+        LogError << "The number of bbox[" << VECTOR_THIRD_INDEX << "] dimensions (" << bboxShape[VECTOR_THIRD_INDEX]
+                 << ") is not equal to (" << OUTPUT_BBOX_TWO_INDEX_SHAPE << ")";
+        return false;
+    }
+    return true;
+}
+
+void CenterNetMindsporePost::Resize_Affine(const cv::Mat &srcDet, cv::Mat &dstDet,
+                                            const ResizedImageInfo &resizedImageInfos) {
+    int new_width, new_height, width, height;
+    float ss = static_cast<float>(YUV_DE);
+    new_height = static_cast<int>(floor(resizedImageInfos.heightResize / YUV_NU));
+    new_width = static_cast<int>(floor(resizedImageInfos.widthResize / YUV_NU));
+    width = static_cast<int>(resizedImageInfos.widthOriginal);
+    height = static_cast<int>(resizedImageInfos.heightOriginal);
+
+    cv::Point2f srcPoint2f[3], dstPoint2f[3];
+    int max_h_w = std::max(static_cast<int>(resizedImageInfos.widthOriginal),
+                           static_cast<int>(resizedImageInfos.heightOriginal));
+    srcPoint2f[0] = cv::Point2f(static_cast<float>(width / ss), static_cast<float>(height / ss));
+    srcPoint2f[1] = cv::Point2f(static_cast<float>(width / ss),
+                                static_cast<float>((height - max_h_w) / ss));
+    srcPoint2f[2] = cv::Point2f(static_cast<float>((width - max_h_w) / ss),
+                                static_cast<float>((height - max_h_w) / ss));
+    dstPoint2f[0] = cv::Point2f(static_cast<float>(new_width) / ss, static_cast<float>(new_height) / ss);
+    dstPoint2f[1] = cv::Point2f(static_cast<float>(new_width) / ss, 0.0);
+    dstPoint2f[2] = cv::Point2f(0.0, 0.0);
+
+    cv::Mat warp_mat(2, 3, CV_32FC1);
+    warp_mat = cv::getAffineTransform(dstPoint2f, srcPoint2f);
+    dstDet = warp_mat;
+}
+
+void CenterNetMindsporePost::affine_transform(const cv::Mat &A, const cv::Mat &B, cv::Mat &dst) {
+    float sum = 0;
+    for (int i = 0; i < A.rows; ++i) {
+        for (int j = 0; j < B.cols; ++j) {
+            for (int k = 0; k < A.cols; ++k) {
+                double s, l;
+                s = A.at<double>(i, k);
+                l = B.at<float>(k, j);
+                sum += s * l;
+                dst.at<float>(i, j) = sum;
+            }
+            sum = 0;
+        }
+    }
+}
+
+void CenterNetMindsporePost::soft_nms(cv::Mat &src, int s, const float sigma,
+                                      const float Nt, const float threshold) {
+    for (int i = 0; i < s; i++) {
+        float tx1, tx2, ty1, ty2, ts, maxscore;
+        int pos, maxpos;
+        maxscore = src.at<float>(i, YUV_NU);
+        maxpos = i;
+        tx1 = src.at<float>(i, BBOX_INDEX_LX);
+        ty1 = src.at<float>(i, BBOX_INDEX_LY);
+        tx2 = src.at<float>(i, BBOX_INDEX_RX);
+        ty2 = src.at<float>(i, BBOX_INDEX_RY);
+        ts = src.at<float>(i, BBOX_INDEX_SCORE);
+        pos = i + 1;
+        // get max box
+        while (pos < s) {
+            float ss = src.at<float>(i, YUV_NU);
+            if (maxscore < ss) {
+                maxscore = ss;
+                maxpos = pos;
+            }
+            pos = pos + 1;
+        }
+        // add max box as a detection
+        src.at<float>(i, BBOX_INDEX_LX) = src.at<float>(maxpos, BBOX_INDEX_LX);
+        src.at<float>(i, BBOX_INDEX_LY) = src.at<float>(maxpos, BBOX_INDEX_LY);
+        src.at<float>(i, BBOX_INDEX_RX) = src.at<float>(maxpos, BBOX_INDEX_RY);
+        src.at<float>(i, BBOX_INDEX_RY) = src.at<float>(maxpos, BBOX_INDEX_RY);
+        src.at<float>(i, BBOX_INDEX_SCORE) = src.at<float>(maxpos, BBOX_INDEX_SCORE);
+
+        // swap ith box with position of max box
+        src.at<float>(maxpos, BBOX_INDEX_LX) = tx1;
+        src.at<float>(maxpos, BBOX_INDEX_LY) = ty1;
+        src.at<float>(maxpos, BBOX_INDEX_RX) = tx2;
+        src.at<float>(maxpos, BBOX_INDEX_RY) = ty2;
+        src.at<float>(maxpos, BBOX_INDEX_SCORE) = ts;
+
+        tx1 = src.at<float>(i, BBOX_INDEX_LX);
+        ty1 = src.at<float>(i, BBOX_INDEX_LY);
+        tx2 = src.at<float>(i, BBOX_INDEX_RX);
+        ty2 = src.at<float>(i, BBOX_INDEX_RY);
+        ts = src.at<float>(i, BBOX_INDEX_SCORE);
+        pos = i +1;
+        // NMS iterations, note that N changes if detection boxes fall below threshold
+        while (pos < s) {
+            float x1, x2, y1, y2, area, iw;
+            x1 = src.at<float>(pos, BBOX_INDEX_LX);
+            y1 = src.at<float>(pos, BBOX_INDEX_LY);
+            x2 = src.at<float>(pos, BBOX_INDEX_RX);
+            y2 = src.at<float>(pos, BBOX_INDEX_RY);
+
+            area = (x2 - x1 + 1) * (y2 - y1 + 1);
+            iw = (std::min(tx2, x2) - std::max(tx1, x1) + 1);
+            if (iw > 0) {
+                float ih;
+                ih = (std::min(ty2, y2) - std::max(ty1, y1) + 1);
+                if (ih > 0) {
+                    float weight, ov, ua;
+                    ua = static_cast<float>((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih);
+                    ov = iw * ih / ua;  // iou between max box and detection box
+                    // gaussian
+                    weight = std::exp(-(ov * ov)/sigma);
+                    src.at<float>(pos, YUV_NU) = weight * (src.at<float>(pos, YUV_NU));
+                    // if box score falls below threshold, discard the box by swapping with last box
+                    // updata s
+                    if ((src.at<float>(pos, YUV_NU)) < threshold) {
+                        float ss1 = s - 1;
+                        src.at<float>(pos, BBOX_INDEX_LX) = src.at<float>(ss1, BBOX_INDEX_LX);
+                        src.at<float>(pos, BBOX_INDEX_LY) = src.at<float>(ss1, BBOX_INDEX_LY);
+                        src.at<float>(pos, BBOX_INDEX_RX) = src.at<float>(ss1, BBOX_INDEX_RX);
+                        src.at<float>(pos, BBOX_INDEX_RY) = src.at<float>(ss1, BBOX_INDEX_RY);
+                        src.at<float>(pos, BBOX_INDEX_SCORE) = src.at<float>(ss1, BBOX_INDEX_SCORE);
+                        s = s - 1;
+                        pos = pos - 1;
+                    }
+                }
+            }
+            pos = pos + 1;
+        }
+    }
+}
+
+void CenterNetMindsporePost::sort_id(float src[][6], const int sum) {
+    for (int k = sum ; k > 0; k--) {
+        for (int m = 0; m < k - 1; m++) {
+            if (src[m][BBOX_INDEX_CLASS] > src[m+1][BBOX_INDEX_CLASS]) {
+                float t0 = src[m][BBOX_INDEX_LX];
+                float t1 = src[m][BBOX_INDEX_LY];
+                float t2 = src[m][BBOX_INDEX_RX];
+                float t3 = src[m][BBOX_INDEX_RY];
+                float t4 = src[m][BBOX_INDEX_SCORE];
+                float t5 = src[m][BBOX_INDEX_CLASS];
+                src[m][BBOX_INDEX_LX] = src[m+1][BBOX_INDEX_LX];
+                src[m][BBOX_INDEX_LY] = src[m+1][BBOX_INDEX_LY];
+                src[m][BBOX_INDEX_RX] = src[m+1][BBOX_INDEX_RX];
+                src[m][BBOX_INDEX_RY] = src[m+1][BBOX_INDEX_RY];
+                src[m][BBOX_INDEX_SCORE] = src[m+1][BBOX_INDEX_SCORE];
+                src[m][BBOX_INDEX_CLASS] = src[m+1][BBOX_INDEX_CLASS];
+                src[m+1][BBOX_INDEX_LX] = t0;
+                src[m+1][BBOX_INDEX_LY] = t1;
+                src[m+1][BBOX_INDEX_RX] = t2;
+                src[m+1][BBOX_INDEX_RY] = t3;
+                src[m+1][BBOX_INDEX_SCORE] = t4;
+                src[m+1][BBOX_INDEX_CLASS] = t5;
+            }
+        }
+    }
+}
+
+void CenterNetMindsporePost::set_nms(float data[][6], int (*p)[2], const int num) {
+    int s1 = 0;
+    int s2 = 0;
+    float sigma = 0.5;
+    float Nt = 0.5;
+    float threshold = 0.001;
+    for (int s = 0; s < num; s++) {
+        int r = *(*(p + s) + 1);
+        if (r !=0) {
+            float class0[r][5];
+            for (int t = 0; t < r; t++) {
+                class0[t][BBOX_INDEX_LX] = data[s1][BBOX_INDEX_LX];
+                class0[t][BBOX_INDEX_LY] = data[s1][BBOX_INDEX_LY];
+                class0[t][BBOX_INDEX_RX] = data[s1][BBOX_INDEX_RX];
+                class0[t][BBOX_INDEX_RY] = data[s1][BBOX_INDEX_RY];
+                class0[t][BBOX_INDEX_SCORE] = data[s1][BBOX_INDEX_SCORE];
+                s1++;
+            }
+            cv::Mat class1(r, 5, CV_32FC1, (reinterpret_cast<float*>(class0)));
+            soft_nms(class1, r, sigma, Nt, threshold);
+            // output and transfer data after soft_nms
+            for (int u = 0; u < r; u++) {
+                data[s2][BBOX_INDEX_LX] = class1.at<float>(u, BBOX_INDEX_LX);
+                data[s2][BBOX_INDEX_LY] = class1.at<float>(u, BBOX_INDEX_LY);
+                data[s2][BBOX_INDEX_RX] = class1.at<float>(u, BBOX_INDEX_RX);
+                data[s2][BBOX_INDEX_RY] = class1.at<float>(u, BBOX_INDEX_RY);
+                data[s2][BBOX_INDEX_SCORE] = class1.at<float>(u, BBOX_INDEX_SCORE);
+                s2++;
+            }
+        }
+    }
+}
+
+void CenterNetMindsporePost::GetValidDetBoxes(const std::vector<TensorBase> &tensors, std::vector<DetectBox> &detBoxes,
+                                               const ResizedImageInfo &resizedImageInfos, uint32_t batchNum) {
+    LogInfo << "Begin to GetValidDetBoxes.";
+    auto *bboxPtr = reinterpret_cast<float *>(GetBuffer(tensors[OUTPUT_BBOX_INDEX], batchNum));  // 1 * 100 *6
+    size_t total = rpnMaxNum_;
+    int tol = rpnMaxNum_;
+    int cnum = classNum_;
+    float first[100][6] = {};
+    float det0[100][2] = {};
+    float det1[100][2] = {};
+    std::string cName[100] = {};
+    int i = 0;
+    for (size_t index = 0; index < total; ++index) {
+        size_t startIndex = index * BBOX_INDEX_SCALE_NUM;
+        first[i][BBOX_INDEX_LX]  = bboxPtr[startIndex + BBOX_INDEX_LX];
+        first[i][BBOX_INDEX_LY]  = bboxPtr[startIndex + BBOX_INDEX_LY];
+        first[i][BBOX_INDEX_RX]  = bboxPtr[startIndex + BBOX_INDEX_RX];
+        first[i][BBOX_INDEX_RY]  = bboxPtr[startIndex + BBOX_INDEX_RY];
+        first[i][BBOX_INDEX_SCORE]  = bboxPtr[startIndex + BBOX_INDEX_SCORE];
+        first[i][BBOX_INDEX_CLASS]  = bboxPtr[startIndex + BBOX_INDEX_CLASS];
+        det0[i][0] = bboxPtr[startIndex + BBOX_INDEX_LX];
+        det0[i][1] = bboxPtr[startIndex + BBOX_INDEX_LY];
+        det1[i][0] = bboxPtr[startIndex + BBOX_INDEX_RX];
+        det1[i][1] = bboxPtr[startIndex + BBOX_INDEX_RY];
+        i += 1;
+    }
+    cv::Mat Det0(100, 2, CV_32FC1, (reinterpret_cast<float*>(det0)));
+    cv::Mat Det1(100, 2, CV_32FC1, (reinterpret_cast<float*>(det1)));
+    cv::Mat Dst0(2, 3, CV_32FC1);
+    Resize_Affine(Det0, Dst0, resizedImageInfos);
+    // bbox affine
+    cv::Mat D0 = cv::Mat::ones(3, 1, CV_32FC1);
+    cv::Mat D1 = cv::Mat::ones(3, 1, CV_32FC1);
+    cv::Mat Dst1(2, 1, CV_32FC1);
+    cv::Mat Dst2(2, 1, CV_32FC1);
+    for (int a = 0; a < tol; a++) {
+        D0.at<float>(0, 0) = first[a][BBOX_INDEX_LX];
+        D0.at<float>(0, 1) = first[a][BBOX_INDEX_LY];
+        D1.at<float>(0, 0) = first[a][BBOX_INDEX_RX];
+        D1.at<float>(0, 1) = first[a][BBOX_INDEX_RY];
+        affine_transform(Dst0, D0, Dst1);
+        affine_transform(Dst0, D1, Dst2);
+        float X1 = Dst1.at<float>(0, 0);
+        float Y1 = Dst1.at<float>(1, 0);
+        float X2 = Dst2.at<float>(0, 0);
+        float Y2 = Dst2.at<float>(1, 0);
+        first[a][BBOX_INDEX_LX] = X1;
+        first[a][BBOX_INDEX_LY] = Y1;
+        first[a][BBOX_INDEX_RX] = X2;
+        first[a][BBOX_INDEX_RY] = Y2;
+    }
+    sort_id(first, tol);
+    int class_id[cnum][2];  // save class_id and number
+    for (int i0 = 0; i0 < cnum; i0++) {
+        class_id[i0][0] = i0;
+        class_id[i0][1] = 0;
+    }
+    for (int a0 = 0; a0 < tol; a0++) {
+        int c0 = 0;
+        int id1 = static_cast<int>(first[a0][BBOX_INDEX_CLASS]);
+        while (c0 < cnum) {
+            if (id1 == c0) {
+                class_id[c0][1]++;
+            }
+            c0++;
+        }
+    }
+    int (*p)[2];
+    p = class_id;
+    set_nms(first, p, cnum);
+    // use new class_names replace old class_names
+    for (int d = 0; d < tol; d++) {
+        int id0 = static_cast<int>(first[d][5]);
+        first[d][5] = coco_class_nameid[id0];
+        cName[d] = configData_.GetClassName(id0);;
+    }
+    for ( int f = 0; f < tol; f++ ) {
+        float XX1 = first[f][BBOX_INDEX_LX];
+        float YY1 = first[f][BBOX_INDEX_LY];
+        float XX2 = first[f][BBOX_INDEX_RX];
+        float YY2 = first[f][BBOX_INDEX_RY];
+        XX2 -= XX1;
+        YY2 -= YY1;
+        MxBase::DetectBox detBox;
+        detBox.x = (XX1 + XX2) / COORDINATE_PARAM;
+        detBox.y = (YY1 + YY2) / COORDINATE_PARAM;  // COORDINATE_PARAM = 2
+        detBox.width = XX2 - XX1;
+        detBox.height = YY2 - YY1;
+        detBox.prob = first[f][BBOX_INDEX_SCORE];
+        detBox.classID = first[f][BBOX_INDEX_CLASS];
+        detBox.className = cName[f];
+        detBoxes.push_back(detBox);
+    }
+}
+
+void CenterNetMindsporePost::ConvertObjInfoFromDetectBox(std::vector<DetectBox> &detBoxes,
+                                                          std::vector<ObjectInfo> &objectInfos,
+                                                          const ResizedImageInfo &resizedImageInfo) {
+     for (auto &detBoxe : detBoxes) {
+        if (detBoxe.classID < 0) {
+            continue;
+        }
+        ObjectInfo objInfo = {};
+        objInfo.classId = static_cast<float>(detBoxe.classID);
+        objInfo.className = detBoxe.className;
+        objInfo.confidence = detBoxe.prob;
+
+        objInfo.x0 = static_cast<float>(detBoxe.x - detBoxe.width / COORDINATE_PARAM);
+        objInfo.y0 = static_cast<float>(detBoxe.y - detBoxe.height / COORDINATE_PARAM);
+        objInfo.x1 = static_cast<float>(detBoxe.x + detBoxe.width / COORDINATE_PARAM);
+        objInfo.y1 = static_cast<float>(detBoxe.y + detBoxe.height / COORDINATE_PARAM);
+        objectInfos.push_back(objInfo);
+    }
+}
+
+void CenterNetMindsporePost::ObjectDetectionOutput(const std::vector<TensorBase> &tensors,
+                                                    std::vector<std::vector<ObjectInfo>> &objectInfos,
+                                                    const std::vector<ResizedImageInfo> &resizedImageInfos) {
+    LogDebug << "CenterNetMindsporePost start to write results.";
+    auto shape = tensors[OUTPUT_BBOX_INDEX].GetShape();
+    uint32_t batchSize = shape[0];
+    for (uint32_t i = 0; i < batchSize; ++i) {
+        std::vector<MxBase::DetectBox> detBoxes;
+        std::vector<ObjectInfo> objectInfo;
+        GetValidDetBoxes(tensors, detBoxes, resizedImageInfos[i], i);
+        ConvertObjInfoFromDetectBox(detBoxes, objectInfo, resizedImageInfos[i]);
+        objectInfos.push_back(objectInfo);
+    }
+    LogDebug << "CenterNetMindsporePost write results succeeded.";
+}
+
+APP_ERROR CenterNetMindsporePost::Process(const std::vector<TensorBase> &tensors,
+                                           std::vector<std::vector<ObjectInfo>> &objectInfos,
+                                           const std::vector<ResizedImageInfo> &resizedImageInfos,
+                                           const std::map<std::string, std::shared_ptr<void>> &configParamMap) {
+    LogDebug << "Begin to process CenterNetMindsporePost.";
+    auto inputs = tensors;
+    APP_ERROR ret = CheckAndMoveTensors(inputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "CheckAndMoveTensors failed, ret=" << ret;
+        return ret;
+    }
+    ObjectDetectionOutput(inputs, objectInfos, resizedImageInfos);
+    LogInfo << "End to process CenterNetMindsporePost.";
+    return APP_ERR_OK;
+}
+
+extern "C" {
+std::shared_ptr<MxBase::CenterNetMindsporePost> GetObjectInstance() {
+    LogInfo << "Begin to get CenterNetMindsporePost instance.";
+    auto instance = std::make_shared<CenterNetMindsporePost>();
+    LogInfo << "End to get CenterNetMindsporePost Instance";
+    return instance;
+}
+}
+
+}  // namespace MxBase
diff --git a/research/cv/centernet_det/infer/mxbase/src/PostProcess/CenterNetMindsporePost.h b/research/cv/centernet_det/infer/mxbase/src/PostProcess/CenterNetMindsporePost.h
new file mode 100644
index 0000000000000000000000000000000000000000..90072ce6f33c9907c96252d00b6370ea52033b93
--- /dev/null
+++ b/research/cv/centernet_det/infer/mxbase/src/PostProcess/CenterNetMindsporePost.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CENTERNET_MINSPORE_PORT_H
+#define CENTERNET_MINSPORE_PORT_H
+#include <algorithm>
+#include <vector>
+#include <map>
+#include <string>
+#include <memory>
+#include <opencv4/opencv2/opencv.hpp>
+#include "MxBase/ErrorCode/ErrorCode.h"
+#include "MxBase/CV/Core/DataType.h"
+#include "MxBase/PostProcessBases/ObjectPostProcessBase.h"
+
+namespace MxBase {
+
+class CenterNetMindsporePost : public ObjectPostProcessBase {
+ public:
+    CenterNetMindsporePost() = default;
+
+    ~CenterNetMindsporePost() = default;
+
+    CenterNetMindsporePost(const CenterNetMindsporePost &other) = default;
+
+    CenterNetMindsporePost &operator=(const CenterNetMindsporePost &other);
+
+    APP_ERROR Init(const std::map<std::string, std::shared_ptr<void>> &postConfig) override;
+
+    APP_ERROR DeInit() override;
+
+    APP_ERROR Process(const std::vector<TensorBase> &tensors, std::vector<std::vector<ObjectInfo>> &objectInfos,
+                      const std::vector<ResizedImageInfo> &resizedImageInfos = {},
+                      const std::map<std::string, std::shared_ptr<void>> &configParamMap = {}) override;
+    bool IsValidTensors(const std::vector<TensorBase> &tensors) const override;
+
+ private:
+    void Resize_Affine(const cv::Mat &srcDet, cv::Mat &dstDet,
+                       const ResizedImageInfo &resizedImageInfos);
+    void affine_transform(const cv::Mat &A, const cv::Mat &B, cv::Mat &dst);
+    void soft_nms(cv::Mat &src, int s, const float sigma, const float Nt, const float threshold);
+    void sort_id(float src[][6], const int sum);
+    void set_nms(float data[][6], int (*p)[2], const int num);
+    void ObjectDetectionOutput(const std::vector<TensorBase> &tensors,
+                               std::vector<std::vector<ObjectInfo>> &objectInfos,
+                               const std::vector<ResizedImageInfo> &resizedImageInfos);
+
+    void GetValidDetBoxes(const std::vector<TensorBase> &tensors, std::vector<DetectBox> &detBoxes,
+                          const ResizedImageInfo &resizedImageInfos, uint32_t batchNum);
+
+    void ConvertObjInfoFromDetectBox(std::vector<DetectBox> &detBoxes, std::vector<ObjectInfo> &objectInfos,
+                                     const ResizedImageInfo &resizedImageInfo);
+
+    APP_ERROR ReadConfigParams();
+
+ private:
+    const uint32_t DEFAULT_CLASS_NUM_MS = 80;
+    const uint32_t DEFAULT_RPN_MAX_NUM_MS = 100;
+
+    uint32_t classNum_ = DEFAULT_CLASS_NUM_MS;
+    uint32_t rpnMaxNum_ = DEFAULT_RPN_MAX_NUM_MS;
+};
+
+extern "C" {
+std::shared_ptr<MxBase::CenterNetMindsporePost> GetObjectInstance();
+}
+}  // namespace MxBase
+#endif  // CENTERNET_MINSPORE_PORT_H
diff --git a/research/cv/centernet_det/infer/mxbase/src/main.cpp b/research/cv/centernet_det/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..88fd18dd5456f844221f8b2afbdffca342c1f62b
--- /dev/null
+++ b/research/cv/centernet_det/infer/mxbase/src/main.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "CenterNet.h"
+#include "MxBase/Log/Log.h"
+
+namespace {
+const uint32_t DEVICE_ID = 0;
+const char RESULT_PATH[] = "../data/predict_result.json";
+
+// parameters of post process
+const uint32_t CLASS_NUM = 80;
+const char LABEL_PATH[] = "../data/config/coco2017.names";
+
+}  // namespace
+
+int main(int argc, char *argv[]) {
+    if (argc <= 2) {
+        LogWarn << "Please input image path, such as './centernet_mindspore [om_file_path] [img_path]'.";
+        return APP_ERR_OK;
+    }
+
+    InitParam initParam = {};
+    initParam.deviceId = DEVICE_ID;
+    initParam.classNum = CLASS_NUM;
+    initParam.labelPath = LABEL_PATH;
+    initParam.checkTensor = true;
+    initParam.modelPath = argv[1];
+    auto inferCenterNet = std::make_shared<CenterNet>();
+    APP_ERROR ret = inferCenterNet->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "CenterNet init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::string imgPath = argv[2];
+    ret = inferCenterNet->Process(imgPath, RESULT_PATH);
+    if (ret != APP_ERR_OK) {
+        LogError << "CenterNet process failed, ret=" << ret << ".";
+        inferCenterNet->DeInit();
+        return ret;
+    }
+    inferCenterNet->DeInit();
+    return APP_ERR_OK;
+}
diff --git a/research/cv/centernet_det/infer/requirements.txt b/research/cv/centernet_det/infer/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..90910aba9ded96f45f351f5bf01dfddca0c0ad99
--- /dev/null
+++ b/research/cv/centernet_det/infer/requirements.txt
@@ -0,0 +1,3 @@
+opencv-python==4.5.1.48
+matplotlib==3.4.1
+protobuf==3.16.0
diff --git a/research/cv/centernet_det/infer/sdk/api/__init__.py b/research/cv/centernet_det/infer/sdk/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/centernet_det/infer/sdk/api/image.py b/research/cv/centernet_det/infer/sdk/api/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1ef6b00aad6f110ed5fdeb37b261013a653f4bc
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/api/image.py
@@ -0,0 +1,86 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Image pre-process functions
+"""
+
+import numpy as np
+import cv2
+
+def transform_preds(coords, center, scale, output_size):
+    """transform prediction to new coords"""
+    target_coords = np.zeros(coords.shape)
+    trans = get_affine_transform(center, scale, 0, output_size, inv=1)
+    for p in range(coords.shape[0]):
+        target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
+    return target_coords
+
+def get_affine_transform(center,
+                         scale,
+                         rot,
+                         output_size,
+                         shift=np.array([0, 0], dtype=np.float32),
+                         inv=0):
+    """get affine matrix"""
+    if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
+        scale = np.array([scale, scale], dtype=np.float32)
+
+    scale_tmp = scale
+    src_w = scale_tmp[0]
+    dst_w = output_size[0]
+    dst_h = output_size[1]
+
+    rot_rad = np.pi * rot / 180
+    src_dir = get_dir([0, src_w * -0.5], rot_rad)
+    dst_dir = np.array([0, dst_w * -0.5], np.float32)
+
+    src = np.zeros((3, 2), dtype=np.float32)
+    dst = np.zeros((3, 2), dtype=np.float32)
+    src[0, :] = center + scale_tmp * shift
+    src[1, :] = center + src_dir + scale_tmp * shift
+    dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
+    dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
+
+    src[2:, :] = get_3rd_point(src[0, :], src[1, :])
+    dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
+
+    if inv:
+        trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
+    else:
+        trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
+
+    return trans
+
+def affine_transform(pt, t):
+    """get new position after affine"""
+    new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
+    new_pt = np.dot(t, new_pt)
+    return new_pt[:2]
+
+
+def get_3rd_point(a, b):
+    """get the third point to calculate affine matrix"""
+    direct = a - b
+    return b + np.array([-direct[1], direct[0]], dtype=np.float32)
+
+def get_dir(src_point, rot_rad):
+    """get new pos after rotate"""
+    sn, cs = np.sin(rot_rad), np.cos(rot_rad)
+
+    src_result = [0, 0]
+    src_result[0] = src_point[0] * cs - src_point[1] * sn
+    src_result[1] = src_point[0] * sn + src_point[1] * cs
+
+    return src_result
diff --git a/research/cv/centernet_det/infer/sdk/api/infer.py b/research/cv/centernet_det/infer/sdk/api/infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..795da4a33bd651f5a6fd775fda608485c1a0f20b
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/api/infer.py
@@ -0,0 +1,150 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the BSD 3-Clause License  (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://opensource.org/licenses/BSD-3-Clause
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+"""
+Inference Api
+"""
+import json
+import logging
+from config import config as cfg
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, MxProtobufIn
+
+
+class SdkApi:
+    """
+    Manage pieline stream
+    """
+    INFER_TIMEOUT = cfg.INFER_TIMEOUT
+    STREAM_NAME = cfg.STREAM_NAME
+
+    def __init__(self, pipeline_cfg):
+        """
+        Parameter initialization
+        """
+        self.pipeline_cfg = pipeline_cfg
+        self._stream_api = None
+        self._data_input = None
+        self._device_id = None
+
+    def init(self):
+        """
+        Stream initialization
+        """
+        with open(self.pipeline_cfg, 'r') as fp:
+            self._device_id = int(json.loads(fp.read())[self.STREAM_NAME]["stream_config"]["deviceId"])
+
+            print(f"The device id: {self._device_id}.")
+
+        # create api
+        self._stream_api = StreamManagerApi()
+
+        # init stream mgr
+        ret = self._stream_api.InitManager()
+        if ret != 0:
+            print(f"Failed to init stream manager, ret={ret}.")
+            return False
+
+        # create streams
+        with open(self.pipeline_cfg, 'rb') as fp:
+            pipe_line = fp.read()
+
+        ret = self._stream_api.CreateMultipleStreams(pipe_line)
+        if ret != 0:
+            print(f"Failed to create stream, ret={ret}.")
+            return False
+
+        self._data_input = MxDataInput()
+        return True
+
+    def __del__(self):
+        if not self._stream_api:
+            return
+
+        self._stream_api.DestroyAllStreams()
+
+    def send_data_input(self, stream_name, plugin_id, input_data):
+        data_input = MxDataInput()
+        data_input.data = input_data
+        unique_id = self._stream_api.SendData(stream_name, plugin_id,
+                                              data_input)
+        if unique_id < 0:
+            logging.error("Fail to send data to stream.")
+            return False
+        return True
+
+    def get_protobuf(self, stream_name, plugin_id, keyVec):
+        result = self._stream_api.GetProtobuf(stream_name, plugin_id, keyVec)
+        return result
+
+    def _send_protobuf(self, stream_name, plugin_id, element_name, buf_type,
+                       pkg_list):
+        """
+        Input image data
+        """
+        protobuf = MxProtobufIn()
+        protobuf.key = element_name.encode("utf-8")
+        protobuf.type = buf_type
+        protobuf.protobuf = pkg_list.SerializeToString()
+        protobuf_vec = InProtobufVector()
+        protobuf_vec.push_back(protobuf)
+        err_code = self._stream_api.SendProtobuf(stream_name, plugin_id,
+                                                 protobuf_vec)
+        if err_code != 0:
+            logging.error(
+                "Failed to send data to stream, stream_name(%s), plugin_id(%s), element_name(%s), "
+                "buf_type(%s), err_code(%s).", stream_name, plugin_id,
+                element_name, buf_type, err_code)
+            return False
+        return True
+
+    def send_img_input(self, stream_name, plugin_id, element_name, input_data,
+                       img_size):
+        """
+        input image data after preprocess
+        """
+        vision_list = MxpiDataType.MxpiVisionList()
+        vision_vec = vision_list.visionVec.add()
+        vision_vec.visionInfo.format = 1
+        vision_vec.visionInfo.width = img_size[1]
+        vision_vec.visionInfo.height = img_size[0]
+        vision_vec.visionInfo.widthAligned = img_size[1]
+        vision_vec.visionInfo.heightAligned = img_size[0]
+        vision_vec.visionData.memType = 0
+        vision_vec.visionData.dataStr = input_data
+        vision_vec.visionData.dataSize = len(input_data)
+
+        buf_type = b"MxTools.MxpiVisionList"
+        return self._send_protobuf(stream_name, plugin_id, element_name,
+                                   buf_type, vision_list)
+
+    def send_tensor_input(self, stream_name, plugin_id, element_name,
+                          input_data, input_shape, data_type):
+        """
+        get image tensor
+        """
+        tensor_list = MxpiDataType.MxpiTensorPackageList()
+        tensor_pkg = tensor_list.tensorPackageVec.add()
+        # init tensor vector
+        tensor_vec = tensor_pkg.tensorVec.add()
+        tensor_vec.deviceId = self._device_id
+        tensor_vec.memType = 0
+        tensor_vec.tensorShape.extend(input_shape)
+        tensor_vec.tensorDataType = data_type
+        tensor_vec.dataStr = input_data
+        tensor_vec.tensorDataSize = len(input_data)
+
+        buf_type = b"MxTools.MxpiTensorPackageList"
+        return self._send_protobuf(stream_name, plugin_id, element_name,
+                                   buf_type, tensor_list)
diff --git a/research/cv/centernet_det/infer/sdk/api/postprocess.py b/research/cv/centernet_det/infer/sdk/api/postprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a8814548f45326242906926e5160de1fb244711
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/api/postprocess.py
@@ -0,0 +1,156 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Post-process functions after decoding
+"""
+
+import numpy as np
+from .image import get_affine_transform, affine_transform, transform_preds
+
+
+valid_ids = [
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
+        14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+        24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
+        37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
+        48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+        58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
+        72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+        82, 84, 85, 86, 87, 88, 89, 90]
+
+
+def coco_box_to_bbox(box):
+    """convert height/width to position coordinates"""
+    bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)
+    return bbox
+
+
+def to_float(x):
+    """format float data"""
+    return float("{:.2f}".format(x))
+
+
+def post_process(dets, meta, num_classes):
+    """rescale detection to original scale"""
+    c, s, h, w = meta['c'], meta['s'], meta['out_height'], meta['out_width']
+    ret = []
+    for i in range(dets.shape[0]):
+        top_preds = {}
+        dets[i, :, :2] = transform_preds(
+            dets[i, :, 0:2], c, s, (w, h))
+        dets[i, :, 2:4] = transform_preds(
+            dets[i, :, 2:4], c, s, (w, h))
+        classes = dets[i, :, -1]
+        for j in range(num_classes):
+            inds = (classes == j)
+            top_preds[j + 1] = np.concatenate([
+                dets[i, inds, :4].astype(np.float32),
+                dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist()
+        ret.append(top_preds)
+
+    for j in range(1, num_classes + 1):
+        ret[0][j] = np.array(ret[0][j], dtype=np.float32).reshape(-1, 5)
+        ret[0][j][:, :4] /= 1
+    return ret[0]
+
+
+def merge_outputs(detections, num_classes):
+    """merge detections together by nms"""
+    results = {}
+    max_per_image = 100
+    for j in range(1, num_classes + 1):
+        results[j] = np.concatenate(
+            [detection[j] for detection in detections], axis=0).astype(np.float32)
+        #global soft_nms
+        try:
+            from nms import soft_nms
+        except ImportError:
+            print('NMS not installed! Do \n cd $CenterNet_ROOT/scripts/ \n'
+                  'and see run_standalone_eval.sh for more details to install it\n')
+        soft_nms(results[j], Nt=0.5, threshold=0.001, method=2)
+
+    scores = np.hstack(
+        [results[j][:, 4] for j in range(1, num_classes + 1)])
+    if len(scores) > max_per_image:
+        kth = len(scores) - max_per_image
+        thresh = np.partition(scores, kth)[kth]
+        for j in range(1, num_classes + 1):
+            keep_inds = (results[j][:, 4] >= thresh)
+            results[j] = results[j][keep_inds]
+    return results
+
+
+def convert_eval_format(detections, img_id, _valid_ids):
+    """convert detection to annotation json format"""
+    pred_anno = {"images": [], "annotations": []}
+    for cls_ind in detections:
+        class_id = _valid_ids[cls_ind - 1]
+        for det in detections[cls_ind]:
+            score = det[4]
+            bbox = det[0:4]
+            bbox[2:4] = det[2:4] - det[0:2]
+            bbox = list(map(to_float, bbox))
+
+            pred = {
+                "image_id": int(img_id),
+                "category_id": int(class_id),
+                "bbox": bbox,
+                "score": to_float(score),
+            }
+            pred_anno["annotations"].append(pred)
+    if pred_anno["annotations"]:
+        pred_anno["images"].append({"id": int(img_id)})
+    return pred_anno
+
+
+def resize_detection(detection, pred, gt):
+    """resize object annotation info"""
+    height, width = gt[0], gt[1]
+    c = np.array([pred[1] / 2., pred[0] / 2.], dtype=np.float32)
+    s = max(pred[0], pred[1]) * 1.0
+    trans_output = get_affine_transform(c, s, 0, [width, height])
+
+    anns = detection["annotations"]
+    num_objects = len(anns)
+    resized_detection = {"images": detection["images"], "annotations": []}
+    for i in range(num_objects):
+        ann = anns[i]
+        bbox = coco_box_to_bbox(ann['bbox'])
+        bbox[:2] = affine_transform(bbox[:2], trans_output)
+        bbox[2:] = affine_transform(bbox[2:], trans_output)
+        bbox[0::2] = np.clip(bbox[0::2], 0, width - 1)
+        bbox[1::2] = np.clip(bbox[1::2], 0, height - 1)
+        h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
+        bbox = [bbox[0], bbox[1], w, h]
+        ann["bbox"] = list(map(to_float, bbox))
+        resized_detection["annotations"].append(ann)
+    return resize_detection
+
+
+def data_process(result, meta, image_id, num_classes):
+    """process inference data"""
+    pred_annos = {"images": [], "annotations": []}
+    detections = []
+    dets = post_process(np.array(result), meta, num_classes)
+    detections.append(dets)
+    detections = merge_outputs(detections, num_classes)
+    pred_json = convert_eval_format(detections, image_id, valid_ids)
+
+    for image_info in pred_json["images"]:
+        pred_annos["images"].append(image_info)
+    for image_anno in pred_json["annotations"]:
+        pred_annos["annotations"].append(image_anno)
+
+    return pred_annos
diff --git a/research/cv/centernet_det/infer/sdk/api/visual.py b/research/cv/centernet_det/infer/sdk/api/visual.py
new file mode 100644
index 0000000000000000000000000000000000000000..0545949b8ecd9954d5298d0c6acee72b32f758e0
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/api/visual.py
@@ -0,0 +1,207 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Visualization of inference results
+"""
+import cv2
+import numpy as np
+from .image import get_affine_transform, affine_transform
+
+
+coco_class_name2id = {'person': 1, 'bicycle': 2, 'car': 3, 'motorcycle': 4, 'airplane': 5,
+                      'bus': 6, 'train': 7, 'truck': 8, 'boat': 9, 'traffic light': 10,
+                      'fire hydrant': 11, 'stop sign': 13, 'parking meter': 14, 'bench': 15,
+                      'bird': 16, 'cat': 17, 'dog': 18, 'horse': 19, 'sheep': 20, 'cow': 21,
+                      'elephant': 22, 'bear': 23, 'zebra': 24, 'giraffe': 25, 'backpack': 27,
+                      'umbrella': 28, 'handbag': 31, 'tie': 32, 'suitcase': 33, 'frisbee': 34,
+                      'skis': 35, 'snowboard': 36, 'sports ball': 37, 'kite': 38, 'baseball bat': 39,
+                      'baseball glove': 40, 'skateboard': 41, 'surfboard': 42, 'tennis racket': 43,
+                      'bottle': 44, 'wine glass': 46, 'cup': 47, 'fork': 48, 'knife': 49, 'spoon': 50,
+                      'bowl': 51, 'banana': 52, 'apple': 53, 'sandwich': 54, 'orange': 55, 'broccoli': 56,
+                      'carrot': 57, 'hot dog': 58, 'pizza': 59, 'donut': 60, 'cake': 61, 'chair': 62,
+                      'couch': 63, 'potted plant': 64, 'bed': 65, 'dining table': 67, 'toilet': 70,
+                      'tv': 72, 'laptop': 73, 'mouse': 74, 'remote': 75, 'keyboard': 76, 'cell phone': 77,
+                      'microwave': 78, 'oven': 79, 'toaster': 80, 'sink': 81, 'refrigerator': 82,
+                      'book': 84, 'clock': 85, 'vase': 86, 'scissors': 87, 'teddy bear': 88,
+                      'hair drier': 89, 'toothbrush': 90}
+
+
+color_list = [
+        0.000, 0.800, 1.000,
+        0.850, 0.325, 0.098,
+        0.929, 0.694, 0.125,
+        0.494, 0.184, 0.556,
+        0.466, 0.674, 0.188,
+        0.301, 0.745, 0.933,
+        0.635, 0.078, 0.184,
+        0.300, 0.300, 0.300,
+        0.600, 0.600, 0.600,
+        1.000, 0.000, 0.000,
+        1.000, 0.500, 0.000,
+        0.749, 0.749, 0.000,
+        0.000, 1.000, 0.000,
+        0.000, 0.000, 1.000,
+        0.667, 0.000, 1.000,
+        0.333, 0.333, 0.000,
+        0.333, 0.667, 0.333,
+        0.333, 1.000, 0.000,
+        0.667, 0.333, 0.000,
+        0.667, 0.667, 0.000,
+        0.667, 1.000, 0.000,
+        1.000, 0.333, 0.000,
+        1.000, 0.667, 0.000,
+        1.000, 1.000, 0.000,
+        0.000, 0.333, 0.500,
+        0.000, 0.667, 0.500,
+        0.000, 1.000, 0.500,
+        0.333, 0.000, 0.500,
+        0.333, 0.333, 0.500,
+        0.333, 0.667, 0.500,
+        0.333, 1.000, 0.500,
+        0.667, 0.000, 0.500,
+        0.667, 0.333, 0.500,
+        0.667, 0.667, 0.500,
+        0.667, 1.000, 0.500,
+        1.000, 0.000, 0.500,
+        1.000, 0.333, 0.500,
+        1.000, 0.667, 0.500,
+        1.000, 1.000, 0.500,
+        0.000, 0.333, 1.000,
+        0.000, 0.667, 1.000,
+        0.000, 1.000, 1.000,
+        0.333, 0.000, 1.000,
+        0.333, 0.333, 1.000,
+        0.333, 0.667, 1.000,
+        0.333, 1.000, 1.000,
+        0.667, 0.000, 1.000,
+        0.667, 0.333, 1.000,
+        0.667, 0.667, 1.000,
+        0.667, 1.000, 1.000,
+        1.000, 0.000, 1.000,
+        1.000, 0.333, 1.000,
+        1.000, 0.667, 1.000,
+        0.167, 0.800, 0.000,
+        0.333, 0.000, 0.000,
+        0.500, 0.000, 0.000,
+        0.667, 0.000, 0.000,
+        0.833, 0.000, 0.000,
+        1.000, 0.000, 0.000,
+        0.000, 0.667, 0.400,
+        0.000, 0.333, 0.000,
+        0.000, 0.500, 0.000,
+        0.000, 0.667, 0.000,
+        0.000, 0.833, 0.000,
+        0.000, 1.000, 0.000,
+        0.000, 0.000, 0.167,
+        0.000, 0.000, 0.333,
+        0.000, 0.000, 0.500,
+        0.000, 0.000, 0.667,
+        0.000, 0.000, 0.833,
+        0.000, 0.000, 1.000,
+        0.000, 0.200, 0.800,
+        0.143, 0.143, 0.543,
+        0.286, 0.286, 0.286,
+        0.429, 0.429, 0.429,
+        0.571, 0.571, 0.571,
+        0.714, 0.714, 0.714,
+        0.857, 0.857, 0.857,
+        0.000, 0.447, 0.741,
+        0.50, 0.5, 0]
+
+
+def coco_box_to_bbox(box):
+    """convert height/width to position coordinates"""
+    bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)
+    return bbox
+
+
+def resize_image(image, anns, width, height):
+    """resize image to specified scale"""
+    h, w = image.shape[0], image.shape[1]
+    c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
+    s = max(image.shape[0], image.shape[1]) * 1.0
+    trans_output = get_affine_transform(c, s, 0, [width, height])
+    out_img = cv2.warpAffine(image, trans_output, (width, height), flags=cv2.INTER_LINEAR)
+
+    num_objects = len(anns)
+    resize_anno = []
+    for i in range(num_objects):
+        ann = anns[i]
+        bbox = coco_box_to_bbox(ann['bbox'])
+        bbox[:2] = affine_transform(bbox[:2], trans_output)
+        bbox[2:] = affine_transform(bbox[2:], trans_output)
+        bbox[0::2] = np.clip(bbox[0::2], 0, width - 1)
+        bbox[1::2] = np.clip(bbox[1::2], 0, height - 1)
+        h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
+        if (h > 0 and w > 0):
+            ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
+            bbox = [ct[0] - w / 2, ct[1] - h / 2, w, h, 1]
+            ann["bbox"] = bbox
+            gt = ann
+            resize_anno.append(gt)
+    return out_img, resize_anno
+
+
+def visual_image(img, annos, save_path, ratio=None, height=None, width=None, name=None, score_threshold=0.01):
+    """visualize image and annotations info"""
+    h, w = img.shape[0], img.shape[1]
+    if height is not None and width is not None and (height != h or width != w):
+        img, annos = resize_image(img, annos, width, height)
+    elif ratio not in (None, 1):
+        img, annos = resize_image(img, annos, w * ratio, h * ratio)
+
+    c_l = np.array(color_list).astype(np.float32)
+    c_l = c_l.reshape((-1, 3)) * 255
+    colors = [(c_l[_]).astype(np.uint8) for _ in range(len(c_l))]
+    colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 3)
+
+    h, w = img.shape[0], img.shape[1]
+    num_objects = len(annos)
+    name_list = []
+    id_list = []
+    for class_name, class_id in coco_class_name2id.items():
+        name_list.append(class_name)
+        id_list.append(class_id)
+
+    for i in range(num_objects):
+        ann = annos[i]
+        bbox = coco_box_to_bbox(ann['bbox'])
+        cat_id = ann['category_id']
+        if cat_id in id_list:
+            get_id = id_list.index(cat_id)
+            name = name_list[get_id]
+            c = colors[get_id].tolist()
+        if "score" in ann:
+            score = ann["score"]
+            if score < score_threshold:
+                continue
+            txt = '{}{:.2f}'.format(name, ann["score"])
+            cat_size = cv2.getTextSize(txt, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)[0]
+            cv2.rectangle(img, (bbox[0], int(bbox[1] - cat_size[1] - 5)),
+                          (int(bbox[0] + cat_size[0]), int(bbox[1] - 2)), c, -1)
+            cv2.putText(img, txt, (bbox[0], int(bbox[1] - 5)),
+                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, lineType=cv2.LINE_AA)
+
+        ct = (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2))
+        cv2.circle(img, ct, 2, c, thickness=-1, lineType=cv2.FILLED)
+        bbox = np.array(bbox, dtype=np.int32).tolist()
+        cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
+
+    if annos and "image_id" in annos[0]:
+        img_id = annos[0]["image_id"]
+    else:
+        img_id = random.randint(0, 9999999)
+    image_name = "cv_image_" + str(img_id) + ".png"
+    cv2.imwrite("{}/{}".format(save_path, image_name), img)
diff --git a/research/cv/centernet_det/infer/sdk/config/centernet.pipeline b/research/cv/centernet_det/infer/sdk/config/centernet.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..c03b753626a9dafa44ee570eebed816b5bc1918c
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/config/centernet.pipeline
@@ -0,0 +1,32 @@
+{
+    "im_centernet": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "appsrc0",
+                "modelPath": "../data/models/centernet.om",
+                "waitingTime": "2000",
+                "outputDeviceId": "-1"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+    }
+}
+
+
diff --git a/research/cv/centernet_det/infer/sdk/config/config.py b/research/cv/centernet_det/infer/sdk/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..73140fbe914628e3abcda869ff9bdc990079c30b
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/config/config.py
@@ -0,0 +1,27 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the BSD 3-Clause License  (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://opensource.org/licenses/BSD-3-Clause
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+"""
+Inference parameter configuration
+"""
+MODEL_WIDTH = 512
+MODEL_HEIGHT = 512
+NUM_CLASSES = 80
+SCORE_THRESH = 0.3
+STREAM_NAME = "im_centernet"
+
+INFER_TIMEOUT = 100000
+
+TENSOR_DTYPE_FLOAT32 = 0
+TENSOR_DTYPE_FLOAT16 = 1
+TENSOR_DTYPE_INT8 = 2
diff --git a/research/cv/centernet_det/infer/sdk/eval/__init__.py b/research/cv/centernet_det/infer/sdk/eval/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/centernet_det/infer/sdk/eval/eval_by_sdk.py b/research/cv/centernet_det/infer/sdk/eval/eval_by_sdk.py
new file mode 100644
index 0000000000000000000000000000000000000000..786565a904f95bbb8e57770f4cbc2ea0fd5eed42
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/eval/eval_by_sdk.py
@@ -0,0 +1,61 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""accuracy calculation"""
+import os
+import json
+import argparse
+import pycocotools.coco as coco
+from pycocotools.cocoeval import COCOeval
+
+
+def get_eval_result(ann_file, result_path):
+    '''get evaluation results'''
+    outputs = []
+    coco_anno = coco.COCO(ann_file)
+    img_ids = coco_anno.getImgIds()
+    for img_id in img_ids:
+        file_id = str(img_id).zfill(12)
+        result_json = os.path.join(result_path, f"infer_{file_id}_result.json")
+        with open(result_json, 'r') as fp:
+            ann = json.loads(fp.read())
+        for i in range(len(ann)):
+            outputs.append(ann[i])
+
+    return outputs
+
+
+def cal_acc(ann_file, result_path):
+    '''calculate inference accuracy'''
+    outputs = get_eval_result(ann_file, result_path)
+    coco_anno = coco.COCO(ann_file)
+    coco_dets = coco_anno.loadRes(outputs)
+    coco_eval = COCOeval(coco_anno, coco_dets, "bbox")
+    coco_eval.evaluate()
+    coco_eval.accumulate()
+    coco_eval.summarize()
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(description="centernet inference")
+    parser.add_argument("--ann_file",
+                        type=str,
+                        required=True,
+                        help="ann file.")
+    parser.add_argument("--result_path",
+                        type=str,
+                        required=True,
+                        help="inference result save path.")
+    args = parser.parse_args()
+    cal_acc(args.ann_file, args.result_path)
diff --git a/research/cv/centernet_det/infer/sdk/external/Makefile b/research/cv/centernet_det/infer/sdk/external/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..39bb6a5eb5323a50ed55da8f7d56079f2d636667
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/external/Makefile
@@ -0,0 +1,3 @@
+all:
+	python setup.py build_ext --inplace
+	rm -rf build
diff --git a/research/cv/centernet_det/infer/sdk/external/__init__.py b/research/cv/centernet_det/infer/sdk/external/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/centernet_det/infer/sdk/external/nms.pyx b/research/cv/centernet_det/infer/sdk/external/nms.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..6499102354b61aff0366459cf7544f59c287a779
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/external/nms.pyx
@@ -0,0 +1,391 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
+
+# ----------------------------------------------------------
+# Soft-NMS: Improving Object Detection With One Line of Code
+# Copyright (c) University of Maryland, College Park
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Navaneeth Bodla and Bharat Singh
+# ----------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+
+cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
+    return a if a >= b else b
+
+cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
+    return a if a <= b else b
+
+def nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
+    cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
+    cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
+    cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
+    cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
+    cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
+
+    cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+    cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
+
+    cdef int ndets = dets.shape[0]
+    cdef np.ndarray[np.int_t, ndim=1] suppressed = \
+            np.zeros((ndets), dtype=np.int)
+
+    # nominal indices
+    cdef int _i, _j
+    # sorted indices
+    cdef int i, j
+    # temp variables for box i's (the box currently under consideration)
+    cdef np.float32_t ix1, iy1, ix2, iy2, iarea
+    # variables for computing overlap with box j (lower scoring box)
+    cdef np.float32_t xx1, yy1, xx2, yy2
+    cdef np.float32_t w, h
+    cdef np.float32_t inter, ovr
+
+    keep = []
+    for _i in range(ndets):
+        i = order[_i]
+        if suppressed[i] == 1:
+            continue
+        keep.append(i)
+        ix1 = x1[i]
+        iy1 = y1[i]
+        ix2 = x2[i]
+        iy2 = y2[i]
+        iarea = areas[i]
+        for _j in range(_i + 1, ndets):
+            j = order[_j]
+            if suppressed[j] == 1:
+                continue
+            xx1 = max(ix1, x1[j])
+            yy1 = max(iy1, y1[j])
+            xx2 = min(ix2, x2[j])
+            yy2 = min(iy2, y2[j])
+            w = max(0.0, xx2 - xx1 + 1)
+            h = max(0.0, yy2 - yy1 + 1)
+            inter = w * h
+            ovr = inter / (iarea + areas[j] - inter)
+            if ovr >= thresh:
+                suppressed[j] = 1
+
+    return keep
+
+def soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0):
+    cdef unsigned int N = boxes.shape[0]
+    cdef float iw, ih, box_area
+    cdef float ua
+    cdef int pos = 0
+    cdef float maxscore = 0
+    cdef int maxpos = 0
+    cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
+
+    for i in range(N):
+        maxscore = boxes[i, 4]
+        maxpos = i
+
+        tx1 = boxes[i,0]
+        ty1 = boxes[i,1]
+        tx2 = boxes[i,2]
+        ty2 = boxes[i,3]
+        ts = boxes[i,4]
+
+        pos = i + 1
+        # get max box
+        while pos < N:
+            if maxscore < boxes[pos, 4]:
+                maxscore = boxes[pos, 4]
+                maxpos = pos
+            pos = pos + 1
+
+        # add max box as a detection 
+        boxes[i,0] = boxes[maxpos,0]
+        boxes[i,1] = boxes[maxpos,1]
+        boxes[i,2] = boxes[maxpos,2]
+        boxes[i,3] = boxes[maxpos,3]
+        boxes[i,4] = boxes[maxpos,4]
+
+        # swap ith box with position of max box
+        boxes[maxpos,0] = tx1
+        boxes[maxpos,1] = ty1
+        boxes[maxpos,2] = tx2
+        boxes[maxpos,3] = ty2
+        boxes[maxpos,4] = ts
+
+        tx1 = boxes[i,0]
+        ty1 = boxes[i,1]
+        tx2 = boxes[i,2]
+        ty2 = boxes[i,3]
+        ts = boxes[i,4]
+
+        pos = i + 1
+        # NMS iterations, note that N changes if detection boxes fall below threshold
+        while pos < N:
+            x1 = boxes[pos, 0]
+            y1 = boxes[pos, 1]
+            x2 = boxes[pos, 2]
+            y2 = boxes[pos, 3]
+            s = boxes[pos, 4]
+
+            area = (x2 - x1 + 1) * (y2 - y1 + 1)
+            iw = (min(tx2, x2) - max(tx1, x1) + 1)
+            if iw > 0:
+                ih = (min(ty2, y2) - max(ty1, y1) + 1)
+                if ih > 0:
+                    ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
+                    ov = iw * ih / ua #iou between max box and detection box
+
+                    if method == 1: # linear
+                        if ov > Nt: 
+                            weight = 1 - ov
+                        else:
+                            weight = 1
+                    elif method == 2: # gaussian
+                        weight = np.exp(-(ov * ov)/sigma)
+                    else: # original NMS
+                        if ov > Nt: 
+                            weight = 0
+                        else:
+                            weight = 1
+
+                    boxes[pos, 4] = weight*boxes[pos, 4]
+                                
+                    # if box score falls below threshold, discard the box by swapping with last box
+                    # update N
+                    if boxes[pos, 4] < threshold:
+                        boxes[pos,0] = boxes[N-1, 0]
+                        boxes[pos,1] = boxes[N-1, 1]
+                        boxes[pos,2] = boxes[N-1, 2]
+                        boxes[pos,3] = boxes[N-1, 3]
+                        boxes[pos,4] = boxes[N-1, 4]
+                        N = N - 1
+                        pos = pos - 1
+
+            pos = pos + 1
+
+    keep = [i for i in range(N)]
+    return keep
+
+def soft_nms_39(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0):
+    cdef unsigned int N = boxes.shape[0]
+    cdef float iw, ih, box_area
+    cdef float ua
+    cdef int pos = 0
+    cdef float maxscore = 0
+    cdef int maxpos = 0
+    cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
+    cdef float tmp
+
+    for i in range(N):
+        maxscore = boxes[i, 4]
+        maxpos = i
+
+        tx1 = boxes[i,0]
+        ty1 = boxes[i,1]
+        tx2 = boxes[i,2]
+        ty2 = boxes[i,3]
+        ts = boxes[i,4]
+
+        pos = i + 1
+        # get max box
+        while pos < N:
+            if maxscore < boxes[pos, 4]:
+                maxscore = boxes[pos, 4]
+                maxpos = pos
+            pos = pos + 1
+
+        # add max box as a detection 
+        boxes[i,0] = boxes[maxpos,0]
+        boxes[i,1] = boxes[maxpos,1]
+        boxes[i,2] = boxes[maxpos,2]
+        boxes[i,3] = boxes[maxpos,3]
+        boxes[i,4] = boxes[maxpos,4]
+
+        # swap ith box with position of max box
+        boxes[maxpos,0] = tx1
+        boxes[maxpos,1] = ty1
+        boxes[maxpos,2] = tx2
+        boxes[maxpos,3] = ty2
+        boxes[maxpos,4] = ts
+
+        for j in range(5, 39):
+            tmp = boxes[i, j]
+            boxes[i, j] = boxes[maxpos, j]
+            boxes[maxpos, j] = tmp
+
+        tx1 = boxes[i,0]
+        ty1 = boxes[i,1]
+        tx2 = boxes[i,2]
+        ty2 = boxes[i,3]
+        ts = boxes[i,4]
+
+        pos = i + 1
+        # NMS iterations, note that N changes if detection boxes fall below threshold
+        while pos < N:
+            x1 = boxes[pos, 0]
+            y1 = boxes[pos, 1]
+            x2 = boxes[pos, 2]
+            y2 = boxes[pos, 3]
+            s = boxes[pos, 4]
+
+            area = (x2 - x1 + 1) * (y2 - y1 + 1)
+            iw = (min(tx2, x2) - max(tx1, x1) + 1)
+            if iw > 0:
+                ih = (min(ty2, y2) - max(ty1, y1) + 1)
+                if ih > 0:
+                    ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
+                    ov = iw * ih / ua #iou between max box and detection box
+
+                    if method == 1: # linear
+                        if ov > Nt: 
+                            weight = 1 - ov
+                        else:
+                            weight = 1
+                    elif method == 2: # gaussian
+                        weight = np.exp(-(ov * ov)/sigma)
+                    else: # original NMS
+                        if ov > Nt: 
+                            weight = 0
+                        else:
+                            weight = 1
+
+                    boxes[pos, 4] = weight*boxes[pos, 4]
+                                
+                    # if box score falls below threshold, discard the box by swapping with last box
+                    # update N
+                    if boxes[pos, 4] < threshold:
+                        boxes[pos,0] = boxes[N-1, 0]
+                        boxes[pos,1] = boxes[N-1, 1]
+                        boxes[pos,2] = boxes[N-1, 2]
+                        boxes[pos,3] = boxes[N-1, 3]
+                        boxes[pos,4] = boxes[N-1, 4]
+                        for j in range(5, 39):
+                            tmp = boxes[pos, j]
+                            boxes[pos, j] = boxes[N - 1, j]
+                            boxes[N - 1, j] = tmp
+                        N = N - 1
+                        pos = pos - 1
+
+            pos = pos + 1
+
+    keep = [i for i in range(N)]
+    return keep
+
+def soft_nms_merge(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0, float weight_exp=6):
+    cdef unsigned int N = boxes.shape[0]
+    cdef float iw, ih, box_area
+    cdef float ua
+    cdef int pos = 0
+    cdef float maxscore = 0
+    cdef int maxpos = 0
+    cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
+    cdef float mx1,mx2,my1,my2,mts,mbs,mw
+
+    for i in range(N):
+        maxscore = boxes[i, 4]
+        maxpos = i
+
+        tx1 = boxes[i,0]
+        ty1 = boxes[i,1]
+        tx2 = boxes[i,2]
+        ty2 = boxes[i,3]
+        ts = boxes[i,4]
+
+        pos = i + 1
+        # get max box
+        while pos < N:
+            if maxscore < boxes[pos, 4]:
+                maxscore = boxes[pos, 4]
+                maxpos = pos
+            pos = pos + 1
+
+        # add max box as a detection 
+        boxes[i,0] = boxes[maxpos,0]
+        boxes[i,1] = boxes[maxpos,1]
+        boxes[i,2] = boxes[maxpos,2]
+        boxes[i,3] = boxes[maxpos,3]
+        boxes[i,4] = boxes[maxpos,4]
+
+        mx1 = boxes[i, 0] * boxes[i, 5]
+        my1 = boxes[i, 1] * boxes[i, 5]
+        mx2 = boxes[i, 2] * boxes[i, 6]
+        my2 = boxes[i, 3] * boxes[i, 6]
+        mts = boxes[i, 5]
+        mbs = boxes[i, 6]
+
+        # swap ith box with position of max box
+        boxes[maxpos,0] = tx1
+        boxes[maxpos,1] = ty1
+        boxes[maxpos,2] = tx2
+        boxes[maxpos,3] = ty2
+        boxes[maxpos,4] = ts
+
+        tx1 = boxes[i,0]
+        ty1 = boxes[i,1]
+        tx2 = boxes[i,2]
+        ty2 = boxes[i,3]
+        ts = boxes[i,4]
+
+        pos = i + 1
+        # NMS iterations, note that N changes if detection boxes fall below threshold
+        while pos < N:
+            x1 = boxes[pos, 0]
+            y1 = boxes[pos, 1]
+            x2 = boxes[pos, 2]
+            y2 = boxes[pos, 3]
+            s = boxes[pos, 4]
+
+            area = (x2 - x1 + 1) * (y2 - y1 + 1)
+            iw = (min(tx2, x2) - max(tx1, x1) + 1)
+            if iw > 0:
+                ih = (min(ty2, y2) - max(ty1, y1) + 1)
+                if ih > 0:
+                    ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
+                    ov = iw * ih / ua #iou between max box and detection box
+
+                    if method == 1: # linear
+                        if ov > Nt: 
+                            weight = 1 - ov
+                        else:
+                            weight = 1
+                    elif method == 2: # gaussian
+                        weight = np.exp(-(ov * ov)/sigma)
+                    else: # original NMS
+                        if ov > Nt: 
+                            weight = 0
+                        else:
+                            weight = 1
+
+                    mw  = (1 - weight) ** weight_exp
+                    mx1 = mx1 + boxes[pos, 0] * boxes[pos, 5] * mw
+                    my1 = my1 + boxes[pos, 1] * boxes[pos, 5] * mw
+                    mx2 = mx2 + boxes[pos, 2] * boxes[pos, 6] * mw
+                    my2 = my2 + boxes[pos, 3] * boxes[pos, 6] * mw
+                    mts = mts + boxes[pos, 5] * mw
+                    mbs = mbs + boxes[pos, 6] * mw
+
+                    boxes[pos, 4] = weight*boxes[pos, 4]
+                                
+                    # if box score falls below threshold, discard the box by swapping with last box
+                    # update N
+                    if boxes[pos, 4] < threshold:
+                        boxes[pos,0] = boxes[N-1, 0]
+                        boxes[pos,1] = boxes[N-1, 1]
+                        boxes[pos,2] = boxes[N-1, 2]
+                        boxes[pos,3] = boxes[N-1, 3]
+                        boxes[pos,4] = boxes[N-1, 4]
+                        N = N - 1
+                        pos = pos - 1
+
+            pos = pos + 1
+
+        boxes[i, 0] = mx1 / mts
+        boxes[i, 1] = my1 / mts
+        boxes[i, 2] = mx2 / mbs
+        boxes[i, 3] = my2 / mbs
+
+    keep = [i for i in range(N)]
+    return keep
diff --git a/research/cv/centernet_det/infer/sdk/external/setup.py b/research/cv/centernet_det/infer/sdk/external/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4d25719a8ab10b9b906cf20e6f91a7a9c4d7bf3
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/external/setup.py
@@ -0,0 +1,18 @@
+import numpy
+from distutils.core import setup
+from distutils.extension import Extension
+from Cython.Build import cythonize
+
+extensions = [
+    Extension(
+        "nms", 
+        ["nms.pyx"],
+        extra_compile_args=["-Wno-cpp", "-Wno-unused-function"]
+    )
+]
+
+setup(
+    name="coco",
+    ext_modules=cythonize(extensions),
+    include_dirs=[numpy.get_include()]
+)
diff --git a/research/cv/centernet_det/infer/sdk/infer_sdk.sh b/research/cv/centernet_det/infer/sdk/infer_sdk.sh
new file mode 100644
index 0000000000000000000000000000000000000000..52e75f22551cc99f96a53d97361cd6c6fb28fde7
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/infer_sdk.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+echo "=============================================================================================================="
+echo "Please run the script as: "
+echo "bash infer_sdk.sh IMG_PATH INFER_MODE INFER_RESULT_DIR ANN_FILE"
+echo "for example of inference: bash infer_sdk.sh /path/image_path infer /path/infer_result /path/annotations_path"
+echo "for example of validation: bash infer_sdk.sh /path/COCO2017/val2017 eval /path/infer_result /path/COCO2017/annotations/instances_val2017.json"
+echo "=============================================================================================================="
+IMG_PATH=$1
+INFER_MODE=$2
+INFER_RESULT_DIR=$3
+ANN_FILE=$4
+
+# install nms module from third party
+if python3.7 -c "import nms" > /dev/null 2>&1
+then
+    echo "NMS module already exits, no need reinstall."
+else
+    cd external || exit
+    make
+    python3.7 setup.py install
+    cd - || exit
+fi
+
+python3.7 main.py  \
+   --img_path=$IMG_PATH \
+   --infer_mode=$INFER_MODE \
+   --infer_result_dir=$INFER_RESULT_DIR \
+   --ann_file=$ANN_FILE > infer_sdk.log 2>&1 &
diff --git a/research/cv/centernet_det/infer/sdk/main.py b/research/cv/centernet_det/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..84aa7a485b23bade222cd7a7bb91c2a1c86b90b1
--- /dev/null
+++ b/research/cv/centernet_det/infer/sdk/main.py
@@ -0,0 +1,167 @@
+# !/usr/bin/env python
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the BSD 3-Clause License  (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://opensource.org/licenses/BSD-3-Clause
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+"""
+Sdk internece
+"""
+import argparse
+import json
+import os
+import time
+
+import copy
+import cv2
+import numpy as np
+
+from api.infer import SdkApi
+from api.visual import visual_image
+from api.postprocess import data_process
+from api.image import get_affine_transform
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StringVector
+from config import config as cfg
+from eval.eval_by_sdk import cal_acc
+
+
+def parser_args():
+    """
+    configuration parameter, input from outside
+    """
+    parser = argparse.ArgumentParser(description="centernet inference")
+
+    parser.add_argument("--img_path",
+                        type=str,
+                        required=True,
+                        help="image file path.")
+    parser.add_argument(
+        "--pipeline_path",
+        type=str,
+        required=False,
+        default="config/centernet.pipeline",
+        help="pipeline file path. The default is 'config/centernet.pipeline'. ")
+    parser.add_argument(
+        "--infer_mode",
+        type=str,
+        required=False,
+        default="infer",
+        help=
+        "infer:only infer, eval: accuracy evaluation. The default is 'infer'.")
+    parser.add_argument(
+        "--infer_result_dir",
+        type=str,
+        required=False,
+        default="../data/infer_result",
+        help=
+        "cache dir of inference result. The default is '../data/infer_result'."
+    )
+
+    parser.add_argument("--ann_file",
+                        type=str,
+                        required=False,
+                        help="eval ann_file.")
+
+    arg = parser.parse_args()
+    return arg
+
+def process_img(img_file):
+    """
+    Preprocessing the images
+    """
+    mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
+    std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
+    input_size = [512, 512]
+    img = cv2.imread(img_file)
+    size = img.shape
+    inp_width = size[1]
+    inp_height = size[0]
+    down_ratio = 4
+    c = np.array([inp_width / 2., inp_height / 2.], dtype=np.float32)
+    s = max(inp_height, inp_width) * 1.0
+    img_metas = {'c': c, 's': s,
+                 'out_height': input_size[0] // down_ratio,
+                 'out_width': input_size[1] // down_ratio}
+    trans_input = get_affine_transform(c, s, 0, [input_size[0], input_size[1]])
+    inp_img = cv2.warpAffine(img, trans_input, (cfg.MODEL_WIDTH, cfg.MODEL_HEIGHT), flags=cv2.INTER_LINEAR)
+    inp_img = (inp_img.astype(np.float32) / 255. - mean) / std
+    eval_image = inp_img.reshape((1,) + inp_img.shape)
+    model_img = eval_image.transpose(0, 3, 1, 2)
+
+    return model_img, img_metas
+
+def image_inference(pipeline_path, stream_name, img_dir, result_dir):
+    """
+    image inference: get inference for images
+    """
+    sdk_api = SdkApi(pipeline_path)
+    if not sdk_api.init():
+        exit(-1)
+
+    if not os.path.exists(result_dir):
+        os.makedirs(result_dir)
+
+    img_data_plugin_id = 0
+    print(f"\nBegin to inference for {img_dir}.\n")
+
+    file_list = os.listdir(img_dir)
+    total_len = len(file_list)
+    for img_id, file_name in enumerate(file_list):
+        if not file_name.lower().endswith((".jpg", "jpeg")):
+            continue
+        image_name, _ = os.path.splitext(file_name)
+        file_path = os.path.join(img_dir, file_name)
+
+        img_np, meta = process_img(file_path)
+        sdk_api.send_tensor_input(stream_name,
+                                  img_data_plugin_id, "appsrc0",
+                                  img_np.tobytes(), img_np.shape, cfg.TENSOR_DTYPE_FLOAT32)
+
+        keys = [b"mxpi_tensorinfer0"]
+        keyVec = StringVector()
+        for key in keys:
+            keyVec.push_back(key)
+        start_time = time.time()
+        infer_result = sdk_api. get_protobuf(stream_name, 0, keyVec)
+        end_time = time.time() - start_time
+        result = MxpiDataType.MxpiTensorPackageList()
+        result.ParseFromString(infer_result[0].messageBuf)
+        result = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr,
+                               dtype='float32').reshape((1, 100, 6))
+        img_id += 1
+        output = data_process(result, meta, image_name, cfg.NUM_CLASSES)
+        print(
+            f"End-2end inference, file_name: {file_path}, {img_id}/{total_len}, elapsed_time: {end_time}.\n"
+        )
+
+        save_pred_image_path = os.path.join(result_dir, "pred_image")
+        if not os.path.exists(save_pred_image_path):
+            os.makedirs(save_pred_image_path)
+        gt_image = cv2.imread(file_path)
+        anno = copy.deepcopy(output["annotations"])
+        visual_image(gt_image, anno, save_pred_image_path, score_threshold=cfg.SCORE_THRESH)
+        pred_res_file = os.path.join(result_dir, 'infer_{}_result.json').format(image_name)
+        with open(pred_res_file, 'w+') as f:
+            json.dump(output["annotations"], f, indent=1)
+
+if __name__ == "__main__":
+    args = parser_args()
+    stream_name0 = cfg.STREAM_NAME.encode("utf-8")
+    print("stream_name0:")
+    print(stream_name0)
+    image_inference(args.pipeline_path, stream_name0, args.img_path,
+                    args.infer_result_dir)
+    if args.infer_mode == "eval":
+        print("Infer end.")
+        print("Begin to eval...")
+        cal_acc(args.ann_file, args.infer_result_dir)
diff --git a/research/cv/centernet_det/modelarts/train_start.py b/research/cv/centernet_det/modelarts/train_start.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9a480bd0caeac9ba09f9cbdb3ccd4fd5e068e30
--- /dev/null
+++ b/research/cv/centernet_det/modelarts/train_start.py
@@ -0,0 +1,241 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Train CenterNet and get network model files(.ckpt)
+"""
+
+import os
+import sys
+import subprocess
+import mindspore.communication.management as D
+from mindspore.communication.management import get_rank
+from mindspore import context
+from mindspore.train.model import Model
+from mindspore.context import ParallelMode
+from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+from mindspore.nn.optim import Adam
+from mindspore import log as logger
+from mindspore.common import set_seed
+from mindspore.profiler import Profiler
+
+try:
+    from src.dataset import COCOHP
+    from src.centernet_det import CenterNetLossCell, CenterNetWithLossScaleCell
+    from src.centernet_det import CenterNetWithoutLossScaleCell
+    from src.utils import LossCallBack, CenterNetPolynomialDecayLR, CenterNetMultiEpochsDecayLR
+    from src.model_utils.config import config, dataset_config, net_config, train_config, export_config
+    from src.model_utils.moxing_adapter import moxing_wrapper
+    from src.model_utils.device_adapter import get_device_id, get_rank_id, get_device_num
+
+except ImportError as import_error:
+    print('Import Error: {}, trying append path/centernet_det/src/../'.format(import_error))
+    sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
+    from src.dataset import COCOHP
+    from src.centernet_det import CenterNetLossCell, CenterNetWithLossScaleCell
+    from src.centernet_det import CenterNetWithoutLossScaleCell
+    from src.utils import LossCallBack, CenterNetPolynomialDecayLR, CenterNetMultiEpochsDecayLR
+    from src.model_utils.config import config, dataset_config, net_config, train_config, export_config
+    from src.model_utils.moxing_adapter import moxing_wrapper
+    from src.model_utils.device_adapter import get_device_id, get_rank_id, get_device_num
+
+
+def _set_parallel_all_reduce_split():
+    """set centernet all_reduce fusion split"""
+    context.set_auto_parallel_context(all_reduce_fusion_config=[18, 59, 100, 141, 182])
+
+
+def _get_params_groups(network, optimizer):
+    """get param groups"""
+    params = network.trainable_params()
+    decay_params = list(filter(lambda x: not optimizer.decay_filter(x), params))
+    other_params = list(filter(optimizer.decay_filter, params))
+    group_params = [{'params': decay_params, 'weight_decay': optimizer.weight_decay},
+                    {'params': other_params, 'weight_decay': 0.0},
+                    {'order_params': params}]
+    return group_params
+
+
+def _get_optimizer(network, dataset_size):
+    """get optimizer, only support Adam right now."""
+    if train_config.optimizer == 'Adam':
+        group_params = _get_params_groups(network, train_config.Adam)
+        if train_config.lr_schedule == "PolyDecay":
+            lr_schedule = CenterNetPolynomialDecayLR(learning_rate=train_config.PolyDecay.learning_rate,
+                                                     end_learning_rate=train_config.PolyDecay.end_learning_rate,
+                                                     warmup_steps=train_config.PolyDecay.warmup_steps,
+                                                     decay_steps=config.train_steps,
+                                                     power=train_config.PolyDecay.power)
+            optimizer = Adam(group_params, learning_rate=lr_schedule, eps=train_config.PolyDecay.eps, loss_scale=1.0)
+        elif train_config.lr_schedule == "MultiDecay":
+            multi_epochs = train_config.MultiDecay.multi_epochs
+            if not isinstance(multi_epochs, (list, tuple)):
+                raise TypeError("multi_epochs must be list or tuple.")
+            if not multi_epochs:
+                multi_epochs = [config.epoch_size]
+            lr_schedule = CenterNetMultiEpochsDecayLR(learning_rate=train_config.MultiDecay.learning_rate,
+                                                      warmup_steps=train_config.MultiDecay.warmup_steps,
+                                                      multi_epochs=multi_epochs,
+                                                      steps_per_epoch=dataset_size,
+                                                      factor=train_config.MultiDecay.factor)
+            optimizer = Adam(group_params, learning_rate=lr_schedule, eps=train_config.MultiDecay.eps, loss_scale=1.0)
+        else:
+            raise ValueError("Don't support lr_schedule {}, only support [PolynormialDecay, MultiEpochDecay]".
+                             format(train_config.optimizer))
+    else:
+        raise ValueError("Don't support optimizer {}, only support [Lamb, Momentum, Adam]".
+                         format(train_config.optimizer))
+    return optimizer
+
+
+def _get_last_ckpt(ckpt_dir):
+    """get ckpt data"""
+    file_dict = {}
+    lists = os.listdir(ckpt_dir)
+    for i in lists:
+        ctime = os.stat(os.path.join(ckpt_dir, i)).st_ctime
+        file_dict[ctime] = i
+    max_ctime = max(file_dict.keys())
+    ckpt_dir = os.path.join(ckpt_dir, file_dict[max_ctime])
+    ckpt_files = [ckpt_file for ckpt_file in os.listdir(ckpt_dir)
+                  if ckpt_file.endswith('.ckpt')]
+    print("ckpt file name:", sorted(ckpt_files)[-1])
+    if not ckpt_files:
+        print("No ckpt file found.")
+        return None
+    ckpt_file_last = os.path.join(ckpt_dir, sorted(ckpt_files)[-1])
+    return ckpt_file_last, ckpt_dir
+
+
+def _export_air(ckpt_dir):
+    """convert ckpt to air"""
+    ckpt_file, ckpt_dir = _get_last_ckpt(ckpt_dir)
+    if not ckpt_file:
+        print('Freezing model failed!')
+        print("can not find ckpt files. ")
+        return
+    file_name = os.path.join(ckpt_dir, export_config.export_name)
+    export_file = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)), os.path.pardir, "export.py")
+    cmd = ["python", export_file,
+           f"--device_id={config.device_id}",
+           f"--export_format={export_config.export_format}",
+           f"--export_load_ckpt={ckpt_file}",
+           f"--export_name={file_name}",
+           f"--num_classes={config.num_classes}"]
+    print(f"Start exporting AIR, cmd = {' '.join(cmd)}.")
+    process = subprocess.Popen(cmd, shell=False)
+    process.wait()
+
+
+def modelarts_pre_process():
+    """modelarts pre process function."""
+    config.mindrecord_dir = config.data_path
+    config.save_checkpoint_path = os.path.join(config.output_path, config.save_checkpoint_path)
+
+
+@moxing_wrapper(pre_process=modelarts_pre_process)
+def train():
+    """training CenterNet"""
+    context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
+    context.set_context(reserve_class_name_in_scope=False)
+    context.set_context(save_graphs=False)
+
+    ckpt_save_dir = config.save_checkpoint_path
+    rank = 0
+    device_num = 1
+    num_workers = 8
+
+    if config.device_target == "Ascend":
+        context.set_context(device_id=get_device_id())
+        if config.distribute == "true":
+
+            D.init()
+            device_num = get_device_num()
+            rank = get_rank_id()
+            ckpt_save_dir = config.save_checkpoint_path + 'ckpt_' + str(get_rank()) + '/'
+
+            context.reset_auto_parallel_context()
+            context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
+                                              device_num=device_num)
+            _set_parallel_all_reduce_split()
+    else:
+        config.distribute = "false"
+        config.need_profiler = "false"
+        config.enable_data_sink = "false"
+
+    # Start create dataset!
+    # mindrecord files will be generated at args_opt.mindrecord_dir such as centernet.mindrecord0, 1, ... file_num.
+    logger.info("Begin creating dataset for CenterNet")
+    coco = COCOHP(dataset_config, run_mode="train", net_opt=net_config, save_path=config.save_result_dir)
+    dataset = coco.create_train_dataset(config.mindrecord_dir, config.mindrecord_prefix,
+                                        batch_size=train_config.batch_size, device_num=device_num, rank=rank,
+                                        num_parallel_workers=num_workers, do_shuffle=config.do_shuffle == 'true')
+    dataset_size = dataset.get_dataset_size()
+    logger.info("Create dataset done!")
+
+    net_with_loss = CenterNetLossCell(net_config)
+
+    config.train_steps = config.epoch_size * dataset_size
+    logger.info("train steps: {}".format(config.train_steps))
+
+    optimizer = _get_optimizer(net_with_loss, dataset_size)
+
+    enable_static_time = config.device_target == "CPU"
+    callback = [TimeMonitor(config.data_sink_steps), LossCallBack(dataset_size, enable_static_time)]
+    if config.enable_save_ckpt == "true" and get_device_id() % min(8, device_num) == 0:
+        config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_steps,
+                                     keep_checkpoint_max=config.save_checkpoint_num)
+        ckpoint_cb = ModelCheckpoint(prefix='checkpoint_centernet',
+                                     directory=None if ckpt_save_dir == "" else ckpt_save_dir, config=config_ck)
+        callback.append(ckpoint_cb)
+
+    if config.load_checkpoint_path:
+        param_dict = load_checkpoint(config.load_checkpoint_path)
+        if config.filter_weight:
+            for key in list(param_dict.keys()):
+                if 'hm_fn' in key:
+                    print("Delete parameter from checkpoint: ", key)
+                    del param_dict[key]
+        load_param_into_net(net_with_loss, param_dict)
+
+    if config.device_target == "Ascend":
+        net_with_grads = CenterNetWithLossScaleCell(net_with_loss, optimizer=optimizer,
+                                                    sens=train_config.loss_scale_value)
+    else:
+        net_with_grads = CenterNetWithoutLossScaleCell(net_with_loss, optimizer=optimizer)
+
+    model = Model(net_with_grads)
+    model.train(config.epoch_size, dataset, callbacks=callback,
+                dataset_sink_mode=(config.enable_data_sink == "true"), sink_size=config.data_sink_steps)
+
+
+def main():
+    train()
+    print('CenterNet training success!')
+    import moxing as mox
+    _export_air(config.output_path)
+    mox.file.copy_parallel(config.output_path, config.train_url)
+    print("Finish sync data from {} to {}.".format(config.output_path, config.train_url))
+    return 0
+
+
+if __name__ == '__main__':
+    if config.need_profiler == "true":
+        profiler = Profiler(output_path=config.profiler_path)
+    set_seed(317)
+    main()
+    if config.need_profiler == "true":
+        profiler.analyse()
diff --git a/research/cv/centernet_det/scripts/docker_start.sh b/research/cv/centernet_det/scripts/docker_start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7c1c17d85abfb5fdc2ae45ef6c8bde811541a93b
--- /dev/null
+++ b/research/cv/centernet_det/scripts/docker_start.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+docker_image=$1
+data_dir=$2
+model_dir=$3
+
+docker run -it --ipc=host \
+               --device=/dev/davinci0 \
+               --device=/dev/davinci1 \
+               --device=/dev/davinci2 \
+               --device=/dev/davinci3 \
+               --device=/dev/davinci4 \
+               --device=/dev/davinci5 \
+               --device=/dev/davinci6 \
+               --device=/dev/davinci7 \
+               --device=/dev/davinci_manager \
+               --device=/dev/devmm_svm --device=/dev/hisi_hdc \
+               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons/ \
+               -v ${model_dir}:${model_dir} \
+               -v ${data_dir}:${data_dir}  \
+               -v ~/ascend/log/npu/conf/slog/slog.conf:/var/log/npu/conf/slog/slog.conf \
+               -v ~/ascend/log/npu/slog/:/var/log/npu/slog -v ~/ascend/log/npu/profiling/:/var/log/npu/profiling \
+               -v ~/ascend/log/npu/dump/:/var/log/npu/dump -v ~/ascend/log/npu/:/usr/slog ${docker_image} \
+               /bin/bash
diff --git a/research/cv/centernet_det/src/model_utils/config.py b/research/cv/centernet_det/src/model_utils/config.py
index cfb6b0b8e26709ea7ae3e230c2657ab248dc1fbf..9bbf3d64ea42a2c7ca6d292850b937fe0c82a943 100644
--- a/research/cv/centernet_det/src/model_utils/config.py
+++ b/research/cv/centernet_det/src/model_utils/config.py
@@ -121,6 +121,20 @@ def extra_operations(cfg):
         cfg: Object after instantiation of class 'Config'.
     """
     cfg.train_config.Adam.decay_filter = lambda x: x.name.endswith('.bias') or x.name.endswith('.beta') or x.name.endswith('.gamma')
+    if cfg.num_classes:
+        cfg.dataset_config.num_classes = int(cfg.num_classes)
+        cfg.net_config.num_classes = int(cfg.num_classes)
+    if cfg.batch_size:
+        cfg.train_config.batch_size = int(cfg.batch_size)
+    if cfg.lr_schedule:
+        cfg.train_config.lr_schedule = cfg.lr_schedule
+    if cfg.learning_rate:
+        cfg.train_config.PolyDecay.learning_rate = float(cfg.learning_rate)
+        cfg.train_config.MultiDecay.learning_rate = float(cfg.learning_rate)
+    if cfg.end_learning_rate:
+        cfg.train_config.PolyDecay.end_learning_rate = float(cfg.end_learning_rate)
+    if cfg.multi_epochs:
+        cfg.train_config.MultiDecay.multi_epochs = [int(list(cfg.multi_epochs)[1]), int(list(cfg.multi_epochs)[3])]
     cfg.export_config.input_res = cfg.dataset_config.input_res
     if cfg.export_load_ckpt:
         cfg.export_config.ckpt_file = cfg.export_load_ckpt