diff --git a/research/cv/RefineDet/docker_start.sh b/research/cv/RefineDet/docker_start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c6d7efcbcd2be378d97bcc125601080804a6553b
--- /dev/null
+++ b/research/cv/RefineDet/docker_start.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# Copyright(C) 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_dir=$2
+model_dir=$3
+
+docker run -it -u root --ipc=host \
+               --device=/dev/davinci0 \
+               --device=/dev/davinci1 \
+               --device=/dev/davinci2 \
+               --device=/dev/davinci3 \
+               --device=/dev/davinci4 \
+               --device=/dev/davinci5 \
+               --device=/dev/davinci6 \
+               --device=/dev/davinci7 \
+               --device=/dev/davinci_manager \
+               --device=/dev/devmm_svm \
+               --device=/dev/hisi_hdc \
+               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons/ \
+               -v ${model_dir}:${model_dir} \
+               -v ${data_dir}:${data_dir}  \
+               -v /root/ascend/log:/root/ascend/log ${docker_image} \
+               /bin/bash
diff --git a/research/cv/RefineDet/infer/convert/aipp.config b/research/cv/RefineDet/infer/convert/aipp.config
new file mode 100644
index 0000000000000000000000000000000000000000..d3f15d58f1addbcd6ff14a7a68bc58e85ed3a7fa
--- /dev/null
+++ b/research/cv/RefineDet/infer/convert/aipp.config
@@ -0,0 +1,26 @@
+aipp_op {
+    aipp_mode : static
+    input_format : RGB888_U8
+    related_input_rank : 0
+    csc_switch : false
+    rbuv_swap_switch : true
+    matrix_r0c0 : 256
+    matrix_r0c1 : 0
+    matrix_r0c2 : 359
+    matrix_r1c0 : 256
+    matrix_r1c1 : -88
+    matrix_r1c2 : -183
+    matrix_r2c0 : 256
+    matrix_r2c1 : 454
+    matrix_r2c2 : 0
+    input_bias_0 : 0
+    input_bias_1 : 128
+    input_bias_2 : 128
+    
+    min_chn_0 : 123.675
+    min_chn_1 : 116.28
+    min_chn_2 : 103.53
+    var_reci_chn_0 : 0.0171247538316637
+    var_reci_chn_1 : 0.0175070028011204
+    var_reci_chn_2 : 0.0174291938997821
+}
\ No newline at end of file
diff --git a/research/cv/RefineDet/infer/convert/air2om.sh b/research/cv/RefineDet/infer/convert/air2om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d6d614feae769c325162aaf3863e2de285ff54f1
--- /dev/null
+++ b/research/cv/RefineDet/infer/convert/air2om.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+input_air_path=$1
+output_om_path=$2
+aipp_cfg=$3
+
+export ASCEND_SLOG_PRINT_TO_STDOUT=0
+
+echo "Input AIR file path: ${input_air_path}"
+echo "Output OM file path: ${output_om_path}"
+echo "AIPP cfg file path: ${aipp_cfg}"
+
+atc  --input_format=NCHW --framework=1 \
+     --model=$input_air_path \
+     --output=$output_om_path \
+     --soc_version=Ascend310 \
+     --disable_reuse_memory=0 \
+     --insert_op_conf=$aipp_cfg \
+     --precision_mode=allow_fp32_to_fp16 \
+     --op_select_implmode=high_precision
diff --git a/research/cv/RefineDet/infer/data/config/refinedet.pipeline b/research/cv/RefineDet/infer/data/config/refinedet.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..6a81baf44c2cb9fbd8d8f1cadaa12c7d63dda91c
--- /dev/null
+++ b/research/cv/RefineDet/infer/data/config/refinedet.pipeline
@@ -0,0 +1,44 @@
+{
+    "refinedet": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+           "factory": "appsrc",
+           "next": "mxpi_imagedecoder0"
+        },
+        "mxpi_imagedecoder0": {
+            "props": {
+                "handleMethod": "opencv"
+            },
+            "factory": "mxpi_imagedecoder",
+            "next": "mxpi_imageresize0"
+        },
+        "mxpi_imageresize0": {
+            "props": {
+                "parentName": "mxpi_imagedecoder0",
+                "handleMethod": "opencv",
+                "resizeHeight": "320",
+                "resizeWidth": "320",
+                "resizeType": "Resizer_Stretch"
+            },
+            "factory": "mxpi_imageresize",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "waitingTime": "3000",
+                "dataSource": "mxpi_imageresize0",
+                "modelPath": "../data/model/refinedet.om"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "factory": "appsink"
+        }
+    }
+}
\ No newline at end of file
diff --git a/research/cv/RefineDet/infer/docker_start_infer.sh b/research/cv/RefineDet/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..05c1f51b0965d290a8b5f33c7aa7d89b061a2013
--- /dev/null
+++ b/research/cv/RefineDet/infer/docker_start_infer.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# Copyright(C) 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+model_dir=$2
+
+if [ -z "${docker_image}" ]; then
+    echo "please input docker_image"
+    exit 1
+fi
+
+if [ ! -d "${model_dir}" ]; then
+    echo "please input model_dir"
+    exit 1
+fi
+
+docker run -it -u root\
+           --device=/dev/davinci0 \
+           --device=/dev/davinci_manager \
+           --device=/dev/devmm_svm \
+           --device=/dev/hisi_hdc \
+           -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+           -v ${model_dir}:${model_dir} \
+           ${docker_image} \
+           /bin/bash
\ No newline at end of file
diff --git a/research/cv/RefineDet/infer/mxbase/CMakeLists.txt b/research/cv/RefineDet/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b60617b167e72a39b2cd63e49737f1c44ef2be19
--- /dev/null
+++ b/research/cv/RefineDet/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,46 @@
+cmake_minimum_required(VERSION 3.14.0)
+project(refinedet)
+set(TARGET refinedet)
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+add_definitions(-Dgoogle=mindxsdk_private)
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -pie)
+
+# Check environment variable
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+# check env
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib)
+
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
+set(OPENSOURCE_DIR $ENV{MX_SDK_HOME}/opensource)
+
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+include_directories(/usr/local/Ascend/ascend-toolkit/latest/include)
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+
+add_executable(${TARGET} ./main.cpp ./refinedetDetection/refinedetDetection.cpp)
+
+target_link_libraries(${TARGET} glog cpprest mxbase opencv_world)
+
+install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
diff --git a/research/cv/RefineDet/infer/mxbase/build.sh b/research/cv/RefineDet/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..dc191685863f25d60819c6b7ee5c0d9e28c56682
--- /dev/null
+++ b/research/cv/RefineDet/infer/mxbase/build.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+path_cur="$(dirname "$0")"
+
+function check_env()
+{
+    # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user
+    if [ ! "${ASCEND_VERSION}" ]; then
+        export ASCEND_VERSION=ascend-toolkit/latest
+        echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}"
+    else
+        echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user"
+    fi
+
+    if [ ! "${ARCH_PATTERN}" ]; then
+        # set ARCH_PATTERN to ./ when it was not specified by user
+        export ARCH_PATTERN=./
+        echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}"
+    else
+        echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user"
+    fi
+}
+
+function build_refinedet()
+{
+    cd "$path_cur" || exit
+    rm -rf build
+    mkdir -p build
+    cd build || exit
+    cmake ..
+    make
+    ret=$?
+    if [ ${ret} -ne 0 ]; then
+        echo "Failed to build refinedet."
+        exit ${ret}
+    fi
+    make install
+}
+
+check_env
+build_refinedet
\ No newline at end of file
diff --git a/research/cv/RefineDet/infer/mxbase/main.cpp b/research/cv/RefineDet/infer/mxbase/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9d3e02b8a858cd2a5bb121f0cd19c3cf2450282
--- /dev/null
+++ b/research/cv/RefineDet/infer/mxbase/main.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <dirent.h>
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include "refinedetDetection/refinedetDetection.h"
+#include "MxBase/Log/Log.h"
+
+std::vector<double> g_infer_cost;
+
+namespace {
+    const uint32_t CLASS_NU = 81;
+    const uint32_t BOX_DIM = 4;
+    const uint32_t RESIZE_WIDTH = 320;
+    const uint32_t RESIZE_HEIGHT = 320;
+
+    const uint32_t MAX_BOXES = 100;
+    const uint32_t NMS_THERSHOLD = 0.6;
+    const uint32_t MIN_SCORE = 0.1;
+    const uint32_t NUM_RETINANETBOXES = 6375;
+}   // namespace
+
+static APP_ERROR init_refinedet_param(InitParam *initParam) {
+    initParam->deviceId = 0;
+    initParam->labelPath = ".../data/config/coco2017.names";
+    initParam->modelPath = ".../data/model/refinedet.om";
+    initParam->resizeWidth = RESIZE_WIDTH;
+    initParam->resizeHeight = RESIZE_HEIGHT;
+    initParam->width = 0;
+    initParam->height = 0;
+    initParam->maxBoxes = MAX_BOXES;
+    initParam->nmsThershold = NMS_THERSHOLD;
+    initParam->minScore = MIN_SCORE;
+    initParam->numRetinanetBoxes = NUM_RETINANETBOXES;
+    initParam->classNum = CLASS_NU;
+    initParam->boxDim = BOX_DIM;
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR ScanImages(const std::string &path, std::vector<std::string> &imgFiles) {
+    DIR *dirPtr = opendir(path.c_str());
+    if (dirPtr == nullptr) {
+        LogError << "opendir failed. dir:" << path;
+        return APP_ERR_INTERNAL_ERROR;
+    }
+    dirent *direntPtr = nullptr;
+    while ((direntPtr = readdir(dirPtr)) != nullptr) {
+        std::string fileName = direntPtr->d_name;
+        if (fileName == "." || fileName == "..") {
+            continue;
+        }
+
+        imgFiles.emplace_back(path + "/" + fileName);
+    }
+    closedir(dirPtr);
+    return APP_ERR_OK;
+}
+
+int main(int argc, char *argv[]) {
+    if (argc <= 1) {
+        LogWarn << "Please input image path, such as './refinedet img_dir'.";
+        return APP_ERR_OK;
+    }
+    InitParam initParam;
+    int ret = init_refinedet_param(&initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "InitrefinedetParam Init failed, ret=" << ret << ".";
+        return ret;
+    }
+    auto refinedet = std::make_shared<refinedetDetection>();
+    ret = refinedet->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "refinedetDetection Init failed, ret=" << ret << ".";
+        return ret;
+    }
+    std::string imgPath = argv[1];
+    std::vector<std::string> imgFilePaths;
+    ret = ScanImages(imgPath, imgFilePaths);
+    if (ret != APP_ERR_OK) {
+        LogError << "Read files from path failed, ret=" << ret << ".";
+        return ret;
+    }
+    for (auto &imgName : imgFilePaths) {
+        ret = refinedet->process(imgName, initParam);
+        if (ret != APP_ERR_OK) {
+            LogError << "refinedetDetection process failed, ret=" << ret << ".";
+            refinedet->DeInit();
+            return ret;
+         }
+    }
+    refinedet->DeInit();
+    double costSum = 0;
+    for (uint32_t i = 0; i < g_infer_cost.size(); i++) {
+        costSum += g_infer_cost[i];
+    }
+    LogInfo << "Infer images sum " << g_infer_cost.size() << ", cost total time: " << costSum << " ms.";
+    LogInfo << "The throughput: " << g_infer_cost.size() * 1000 / costSum << " bin/sec.";
+    return APP_ERR_OK;
+}
diff --git a/research/cv/RefineDet/infer/mxbase/refinedetDetection/refinedetDetection.cpp b/research/cv/RefineDet/infer/mxbase/refinedetDetection/refinedetDetection.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ee2cf88e7280432a9ccb32d97dce051a778f396b
--- /dev/null
+++ b/research/cv/RefineDet/infer/mxbase/refinedetDetection/refinedetDetection.cpp
@@ -0,0 +1,492 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "refinedetDetection.h"
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <map>
+#include <algorithm>
+#include <fstream>
+
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+APP_ERROR refinedetDetection::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor Init failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::DeInit() {
+    model_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::read_image(const std::string &imgPath, cv::Mat &imageMat) {
+    imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::inference(const std::vector<MxBase::TensorBase> &inputs,
+                                        std::vector<MxBase::TensorBase> &outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t) modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs.push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    // Set the type to static batch
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    g_infer_cost.push_back(costMs);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::get_tensor_output(size_t index, MxBase::TensorBase output,
+                                             InitParam &initParam) {
+    // get inference result from output
+    std::vector<std::vector<uint32_t>> dimList = {
+            {initParam.numRetinanetBoxes, initParam.boxDim},
+            {initParam.numRetinanetBoxes, initParam.classNum},
+    };
+
+    // check tensor is available
+    std::vector<uint32_t> outputShape = output.GetShape();
+    uint32_t len = outputShape.size();
+    for (uint32_t i = 0; i < len; ++i) {
+        LogInfo << "output" << index << " shape dim " << i << " is: " << outputShape[i] << std::endl;
+    }
+
+    LogInfo << "image height : " << initParam.height;
+    LogInfo << "image width : " << initParam.width;
+    float *outputPtr = reinterpret_cast<float *>(output.GetBuffer());
+
+    uint32_t C = dimList[index][0];  // row
+    uint32_t H = dimList[index][1];  // col
+    std::vector<float> outputVec;
+
+    for (size_t c = 0; c < C; c++) {
+        for (size_t h = 0; h < H; h++) {
+            float value = *(outputPtr + c * H + h);
+            outputVec.push_back(value);
+        }
+    }
+    cv::Mat outputs = cv::Mat(outputVec).reshape(0, C).clone();
+    std::vector<float> outputList;
+    if (index == 0) {
+        for (int i = 0; i < outputs.rows; i++) {
+            for (int j = 0; j < outputs.cols; j++) {
+                outputList.push_back(outputs.at<float>(i, j));
+            }
+            initParam.boxes.push_back(outputList);
+            outputList.clear();
+        }
+    }
+    if (index == 1) {
+        for (int i = 0; i < outputs.rows; i++) {
+            for (int j = 0; j < outputs.cols; j++) {
+                outputList.push_back(outputs.at<float>(i, j));
+            }
+            initParam.scores.push_back(outputList);
+            outputList.clear();
+        }
+    }
+    LogInfo << "initParam.boxes size  " << initParam.boxes.size();
+    std::string buffer;
+    std::ifstream inputFile(initParam.labelPath);
+    if (!inputFile) {
+        LogInfo << "coco.names file pen error" << std::endl;
+        return 0;
+    }
+    while (getline(inputFile, buffer)) {
+        initParam.label.push_back(buffer);
+    }
+
+    if (index == 1) {
+        int ret = get_anm_result(initParam);
+        if (ret != APP_ERR_OK) {
+            LogError << "get_anm_result Init failed, ret=" << ret << ".";
+            return ret;
+        }
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::get_anm_result(InitParam &initParam) {
+    // get anm result from output
+    std::vector<std::vector<std::vector<float>>> final_boxes;
+    std::vector<std::vector<std::string>> final_label;
+    std::vector<std::vector<float>> final_score;
+
+    for (unsigned int c = 1; c < initParam.classNum; ++c) {
+        int ret;
+        std::vector<float> class_box_scores;
+        std::vector<float> class_box_scores_new;
+        std::vector<int> score_mask;
+        std::vector<std::string> class_box_label;
+        std::vector<std::vector<float>> class_boxes;
+        std::vector<std::vector<float>> class_boxes_final;
+        ret = get_column_data(class_box_scores, initParam.scores, static_cast<int>(c));
+        if (ret != APP_ERR_OK) {
+            LogError << "get_column_data failed, ret=" << ret << ".";
+            return ret;
+        }
+        for (unsigned int k = 0; k < class_box_scores.size(); ++k) {
+            if (class_box_scores[k] > initParam.minScore) {
+                score_mask.push_back(k);
+            }
+        }
+        for (unsigned int k = 0; k < score_mask.size(); ++k) {
+            class_box_scores_new.push_back(class_box_scores[score_mask[k]]);
+            class_boxes.push_back(initParam.boxes[score_mask[k]]);
+        }
+        for (unsigned int l = 0; l < class_boxes.size(); ++l) {
+            for (unsigned int m = 0; m < class_boxes[l].size(); ++m) {
+                if (m % 2 == 0) class_boxes[l][m] *= initParam.height;
+                else
+                    class_boxes[l][m] *= initParam.width;
+            }
+        }
+        class_box_scores.clear();
+        if (score_mask.size() > 0) {
+            std::vector<int> nms_index;
+            ret = apply_nms(class_boxes, class_box_scores_new, nms_index, initParam);
+            if (ret != APP_ERR_OK) {
+                LogError << "apply_nms failed, ret=" << ret << ".";
+                return ret;
+            }
+            for (unsigned int j = 0; j < nms_index.size(); ++j) {
+                float x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+                x1 = class_boxes[nms_index[j]][1];
+                y1 = class_boxes[nms_index[j]][0];
+                x2 = class_boxes[nms_index[j]][3] - class_boxes[nms_index[j]][1];
+                y2 = class_boxes[nms_index[j]][2] - class_boxes[nms_index[j]][0];
+                std::vector<float> box_end{x1, y1, x2, y2};
+                class_boxes_final.push_back(box_end);
+                LogInfo << "class_boxes : " << x1 << " " << y1 << " " << x2 << " " << y2;
+                class_box_scores.push_back(class_box_scores_new[nms_index[j]]);
+                LogInfo << "class_box_scores : " << class_box_scores_new[nms_index[j]];
+                class_box_label.push_back(initParam.label[c]);
+                LogInfo << "class_box_label : " << initParam.label[c];
+                box_end.clear();
+            }
+            final_boxes.push_back(class_boxes_final);
+            final_score.push_back(class_box_scores);
+            final_label.push_back(class_box_label);
+            nms_index.clear();
+        }
+        class_box_scores.clear();
+        class_box_scores_new.clear();
+        score_mask.clear();
+        class_box_label.clear();
+        class_boxes.clear();
+        class_boxes_final.clear();
+    }
+
+    int ret = write_result(final_boxes, final_score, final_label);
+    final_boxes.clear();
+    final_score.clear();
+    final_label.clear();
+    if (ret != APP_ERR_OK) {
+        LogError << "write_result failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::write_result(std::vector<std::vector<std::vector<float>>> final_boxes,
+                                           std::vector<std::vector<float>> final_score,
+                                           std::vector<std::vector<std::string>> final_label) {
+    // save the result to a file
+    std::string resultPathName = "./result";
+
+    // create result directory when it does not exit
+    if (access(resultPathName.c_str(), 0) != 0) {
+        int ret = mkdir(resultPathName.c_str(), S_IRUSR | S_IWUSR | S_IXUSR);
+        if (ret != 0) {
+            LogError << "Failed to create result directory: " << resultPathName << ", ret = " << ret;
+            return APP_ERR_COMM_OPEN_FAIL;
+        }
+    }
+    // create result file under result directory
+    resultPathName = resultPathName + "/output.txt";
+
+    std::ofstream tfile(resultPathName, std::ofstream::app);
+    if (tfile.fail()) {
+        LogError << "Failed to open result file: " << resultPathName;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+
+    for (unsigned int j = 0; j < final_score.size(); j++) {
+        for (unsigned int k = 0; k < final_score[j].size(); k++) {
+            tfile << "bbox:" << final_boxes[j][k][0] << " " << final_boxes[j][k][1] << " " << final_boxes[j][k][2]
+                  << " " << final_boxes[j][k][3] << " " << "score:" << final_score[j][k]
+                  << " " << "category:" << final_label[j][k] << std::endl;
+        }
+    }
+    tfile.close();
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::apply_nms(std::vector<std::vector<float>> &class_boxes,
+                                        std::vector<float> &class_box_scores, std::vector<int> &keep,
+                                        const InitParam &initParam) {
+    // apply nms to get index
+    std::vector<float> y1;
+    std::vector<float> x1;
+    std::vector<float> y2;
+    std::vector<float> x2;
+    int ret = get_column_data(y1, class_boxes, 0);
+    if (ret != APP_ERR_OK) {
+        LogError << "get_column_data failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = get_column_data(x1, class_boxes, 1);
+    if (ret != APP_ERR_OK) {
+        LogError << "get_column_data failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = get_column_data(y2, class_boxes, 2);
+    if (ret != APP_ERR_OK) {
+        LogError << "get_column_data failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = get_column_data(x2, class_boxes, 3);
+    if (ret != APP_ERR_OK) {
+        LogError << "get_column_data failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::vector<float> areas;
+    for (unsigned int i = 0; i < class_boxes.size(); ++i) {
+        areas.push_back((x2[i] - x1[i] + 1) * (y2[i] - y1[i] + 1));
+    }
+    std::vector<int> order;
+    std::vector<float> class_box_scores_order = class_box_scores;
+    for (unsigned int i = 0; i < class_box_scores.size(); ++i) {
+        int maxPosition = max_element(class_box_scores_order.begin(),
+                                      class_box_scores_order.end()) - class_box_scores_order.begin();
+        order.push_back(maxPosition);
+        class_box_scores_order[maxPosition] = 0;
+    }
+    while (order.size() > 0) {
+        int i = order[0];
+        keep.push_back(i);
+        if (keep.size() >= initParam.maxBoxes) {
+            break;
+        }
+        std::vector<float> yy1, xx1, yy2, xx2;
+        ret = maxiMum(y1[i], y1, order, yy1);
+        if (ret != APP_ERR_OK) {
+            LogError << "maxiMum failed, ret=" << ret << ".";
+            return ret;
+        }
+        ret = maxiMum(x1[i], x1, order, xx1);
+        if (ret != APP_ERR_OK) {
+            LogError << "maxiMum failed, ret=" << ret << ".";
+            return ret;
+        }
+        ret = miniMum(y2[i], y2, order, yy2);
+        if (ret != APP_ERR_OK) {
+            LogError << "miniMum failed, ret=" << ret << ".";
+            return ret;
+        }
+        ret = miniMum(x2[i], x2, order, xx2);
+        if (ret != APP_ERR_OK) {
+            LogError << "miniMum failed, ret=" << ret << ".";
+            return ret;
+        }
+
+        std::vector<float> inter;
+        for (unsigned int j = 0; j < xx1.size(); ++j) {
+            float w, h;
+            w = (xx2[j] - xx1[j] + 1) > 0.0 ? xx2[j] - xx1[j] + 1: 0.0;
+            h = (yy2[j] - yy1[j] + 1) > 0.0 ? yy2[j] - yy1[j] + 1: 0.0;
+            inter.push_back(w * h);
+        }
+        ret = get_order_data(areas, inter, order, initParam);
+        if (ret != APP_ERR_OK) {
+            LogError << "get_order_data failed, ret=" << ret << ".";
+            return ret;
+        }
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::get_order_data(const std::vector<float> &areas, const std::vector<float> &inter,
+                                             std::vector<int> &order, const InitParam &initParam) {
+    int i = order[0];
+    std::vector<float> ovr;
+    for (unsigned int j = 1; j < order.size(); ++j) {
+        ovr.push_back(inter[j - 1] / (areas[i] + areas[order[j]] - inter[j - 1]));
+    }
+    std::vector<int> inds;
+    for (unsigned int j = 0; j < ovr.size(); ++j) {
+        if (ovr[j] <= initParam.nmsThershold) {
+            inds.push_back(j);
+        }
+    }
+    std::vector<int> order_new;
+    for (unsigned int j = 0; j < inds.size(); ++j) {
+        order_new.push_back(order[inds[j] + 1]);
+    }
+    order.swap(order_new);
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::get_column_data(std::vector<float> &get_vector,
+                                              std::vector<std::vector<float>> &input_vector, int index) {
+    for (unsigned int i = 0; i < input_vector.size(); ++i) {
+        get_vector.push_back(input_vector[i][index]);
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::maxiMum(float x, std::vector<float> &other_x,
+                                      std::vector<int> &order, std::vector<float> &get_x) {
+    for (unsigned int i = 1; i < order.size(); ++i) {
+        if (x > other_x[order[i]]) {
+            get_x.push_back(x);
+        } else {
+            get_x.push_back(other_x[order[i]]);
+        }
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::miniMum(float x, std::vector<float> &other_x,
+                                      std::vector<int> &order, std::vector<float> &get_x) {
+    for (unsigned int i = 1; i < order.size(); ++i) {
+        if (x < other_x[order[i]]) {
+            get_x.push_back(x);
+        } else {
+            get_x.push_back(other_x[order[i]]);
+        }
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::post_process(std::vector<MxBase::TensorBase> outputs, InitParam &initParam) {
+    // post process
+    for (size_t index = 0; index < outputs.size(); index++) {
+        APP_ERROR ret = outputs[index].ToHost();
+        if (ret != APP_ERR_OK) {
+            LogError << GetError(ret) << "tohost fail.";
+            return ret;
+        }
+        get_tensor_output(index, outputs[index], initParam);
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::cvmat_to_tensorbase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase) {
+    const uint32_t dataSize = imageMat.cols * imageMat.rows * MxBase::YUV444_RGB_WIDTH_NU;
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(imageMat.data, dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+    std::vector<uint32_t> shape = {1, MxBase::YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(imageMat.rows),
+                                   static_cast<uint32_t>(imageMat.cols)};
+    tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_FLOAT32);
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat, const InitParam &initParam) {
+    static constexpr uint32_t resizeHeight = 320;
+    static constexpr uint32_t resizeWidth = 320;
+    cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight));
+    return APP_ERR_OK;
+}
+
+APP_ERROR refinedetDetection::process(const std::string &imgName, InitParam &initParam) {
+    cv::Mat imageMat;
+    APP_ERROR ret = read_image(imgName, imageMat);
+    if (ret != APP_ERR_OK) {
+        LogError << "read_image failed, ret=" << ret << ".";
+        return ret;
+    }
+    initParam.width = imageMat.cols;
+    initParam.height = imageMat.rows;
+    ret = resize(imageMat, imageMat, initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "resize failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    // preprocess a photo to tensor
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs = {};
+    MxBase::TensorBase tensorBase;
+    ret = cvmat_to_tensorbase(imageMat, tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "cvmat_to_tensorbase failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    inputs.push_back(tensorBase);
+    ret = inference(inputs, outputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "inference failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    ret = post_process(outputs, initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "post_process failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    imageMat.release();
+    return APP_ERR_OK;
+}
diff --git a/research/cv/RefineDet/infer/mxbase/refinedetDetection/refinedetDetection.h b/research/cv/RefineDet/infer/mxbase/refinedetDetection/refinedetDetection.h
new file mode 100644
index 0000000000000000000000000000000000000000..69633f21c7a555c41eb7a0081659a41ed26c64ba
--- /dev/null
+++ b/research/cv/RefineDet/infer/mxbase/refinedetDetection/refinedetDetection.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_REFINEDETDETECTION_H
+#define MXBASE_REFINEDETDETECTION_H
+
+#include <memory>
+#include <utility>
+#include <vector>
+#include <string>
+#include <map>
+#include <opencv2/opencv.hpp>
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+extern std::vector<double> g_infer_cost;
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string labelPath;
+    std::string modelPath;
+    uint32_t resizeHeight;
+    uint32_t resizeWidth;
+    std::vector<std::string> label;
+    uint32_t width;
+    uint32_t height;
+    std::vector<std::vector<float>> boxes;
+    std::vector<std::vector<float>> scores;
+    uint32_t maxBoxes;
+    uint32_t nmsThershold;
+    uint32_t minScore;
+    uint32_t numRetinanetBoxes;
+    uint32_t classNum;
+    uint32_t boxDim;
+};
+
+class refinedetDetection {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+    APP_ERROR DeInit();
+    APP_ERROR inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
+    APP_ERROR process(const std::string &imgName, InitParam &initParam);
+
+ protected:
+    APP_ERROR read_image(const std::string &imgPath, cv::Mat &imageMat);
+    APP_ERROR resize(cv::Mat &srcImageMat, cv::Mat &dstImageMat, const InitParam &initParam);
+    APP_ERROR cvmat_to_tensorbase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase);
+    APP_ERROR get_tensor_output(size_t index, MxBase::TensorBase output, InitParam &initParam);
+    APP_ERROR get_anm_result(InitParam &initParam);
+    APP_ERROR post_process(std::vector<MxBase::TensorBase> outputs, InitParam &initParam);
+    APP_ERROR write_result(std::vector<std::vector<std::vector<float>>> final_boxes,
+                           std::vector<std::vector<float>> final_score,
+                           std::vector<std::vector<std::string>> final_label);
+    APP_ERROR apply_nms(std::vector<std::vector<float>> &class_boxes,
+                        std::vector<float> &class_box_scores, std::vector<int> &keep, const InitParam &initParam);
+    APP_ERROR get_column_data(std::vector<float> &get_vector,
+                              std::vector<std::vector<float>> &input_vector, int index);
+    APP_ERROR maxiMum(float x, std::vector<float> &other_x,
+                      std::vector<int> &order, std::vector<float> &get_x);
+    APP_ERROR miniMum(float x, std::vector<float> &other_x,
+                      std::vector<int> &order, std::vector<float> &get_x);
+    APP_ERROR get_order_data(const std::vector<float> &areas, const std::vector<float> &inter,
+                             std::vector<int> &order, const InitParam &initParam);
+
+ private:
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    MxBase::ModelDesc modelDesc_ = {};
+    uint32_t deviceId_ = 0;
+};
+#endif
diff --git a/research/cv/RefineDet/infer/sdk/main.py b/research/cv/RefineDet/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..387a63a261eb38b7db990b1109ca809b70078296
--- /dev/null
+++ b/research/cv/RefineDet/infer/sdk/main.py
@@ -0,0 +1,217 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""infer_by_sdk"""
+import argparse
+import json
+import os
+
+import cv2
+import numpy as np
+from pycocotools.coco import COCO
+from StreamManagerApi import MxDataInput, StringVector
+from StreamManagerApi import StreamManagerApi
+import MxpiDataType_pb2 as MxpiDataType
+
+# Create an ArgumentParser object
+parser = argparse.ArgumentParser(description='"Refinedet infer " "example."')
+
+NUM_CLASSES = 81
+MIN_SCORE = 0.1
+MAX_BOXES = 100
+NMS_THERSHOLD = 0.6
+
+# Gets the Full Path to the Current Script
+current_path = os.path.abspath(os.path.dirname(__file__))
+
+# pipeline directory
+parser.add_argument("--pipeline_path", type=str, default=os.path.join(current_path, "../config/refinedet.pipeline"))
+parser.add_argument("--stream_name", type=str, default="refinedet")
+parser.add_argument("--img_path", type=str, default=os.path.join(current_path, "../data/coco2017/val2017/"))
+parser.add_argument("--instances_path", type=str, default=os.path.join(current_path, "../instances_val2017.json"))
+parser.add_argument("--label_path", type=str, default=os.path.join(current_path, "../data/config/coco2017.names"))
+parser.add_argument("--res_path", type=str, default=os.path.join(current_path, "../sdk/result/"), required=False)
+
+# Analytical Parameters
+args = parser.parse_args()
+
+def apply_nms(all_boxes, all_scores, thres, max_boxes):
+    """Apply NMS to bboxes."""
+    y1 = all_boxes[:, 0]
+    x1 = all_boxes[:, 1]
+    y2 = all_boxes[:, 2]
+    x2 = all_boxes[:, 3]
+    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+
+    order = all_scores.argsort()[::-1]
+    keep = []
+
+    while order.size > 0:
+        i = order[0]
+        keep.append(i)
+
+        if len(keep) >= max_boxes:
+            break
+
+        xx1 = np.maximum(x1[i], x1[order[1:]])
+        yy1 = np.maximum(y1[i], y1[order[1:]])
+        xx2 = np.minimum(x2[i], x2[order[1:]])
+        yy2 = np.minimum(y2[i], y2[order[1:]])
+
+        w = np.maximum(0.0, xx2 - xx1 + 1)
+        h = np.maximum(0.0, yy2 - yy1 + 1)
+        inter = w * h
+
+        ovr = inter / (areas[i] + areas[order[1:]] - inter)
+
+        inds = np.where(ovr <= thres)[0]
+
+        order = order[inds + 1]
+    return keep
+
+def send_data_get_output(stream_name, data_input, stream_manager):
+    # plug-in id
+    in_plugin_id = 0
+
+    # Send data to the plug-in
+    unique_id = stream_manager.SendData(stream_name, in_plugin_id, data_input)
+    if unique_id < 0:
+        print("Failed to send data to stream.")
+        exit()
+    plugin_names = [b"mxpi_tensorinfer0"]
+    name_vector = StringVector()
+    for name in plugin_names:
+        name_vector.push_back(name)
+
+    infer_result = stream_manager.GetProtobuf(stream_name, 0, name_vector)
+    if infer_result[0].errorCode != 0:
+        error_message = "GetProtobuf error. errorCode=%d, errorMessage=%s" % (
+            infer_result[0].errorCode, infer_result[0].messageName)
+        raise AssertionError(error_message)
+
+    tensor_package = MxpiDataType.MxpiTensorPackageList()
+    tensor_package.ParseFromString(infer_result[0].messageBuf)
+    box = np.frombuffer(tensor_package.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4').reshape(6375,
+                                                                                                      4)
+    score = np.frombuffer(tensor_package.tensorPackageVec[0].tensorVec[1].dataStr, dtype='<f4').reshape(6375,
+                                                                                                        81)
+    return box, score
+
+def parse_img_infer_result(sample, predictions, val_cls_dict, classs_dict):
+    pred_boxes = sample['boxes']
+    box_scores = sample['box_scores']
+    img_id_card = sample['img_id']
+    img_size = sample['image_shape']
+    h, w = img_size[0], img_size[1]
+
+    final_boxes = []
+    final_label = []
+    final_score = []
+
+    for c in range(1, NUM_CLASSES):
+        class_box_scores = box_scores[:, c]
+        score_mask = class_box_scores > MIN_SCORE
+        class_box_scores = class_box_scores[score_mask]
+        class_boxes = pred_boxes[score_mask] * [h, w, h, w]
+        if score_mask.any():
+            nms_index = apply_nms(class_boxes, class_box_scores, NMS_THERSHOLD, MAX_BOXES)
+            class_boxes = class_boxes[nms_index]
+            class_box_scores = class_box_scores[nms_index]
+            final_boxes += class_boxes.tolist()
+            final_score += class_box_scores.tolist()
+            final_label += [classs_dict[val_cls_dict[c]]] * len(class_box_scores)
+
+    for loc, label, score in zip(final_boxes, final_label, final_score):
+        res = {}
+        res['image_id'] = img_id_card
+        res['bbox'] = [loc[1], loc[0], loc[3] - loc[1], loc[2] - loc[0]]
+        res['score'] = score
+        res['category_id'] = label
+        predictions.append(res)
+    print("Parse the box success")
+
+# Images reasoning
+def infer():
+    """Infer images by refinedet.    """
+
+    # Create StreamManagerApi object
+    stream_manager_api = StreamManagerApi()
+    # Use InitManager method init StreamManagerApi
+    ret = stream_manager_api.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        exit()
+
+    # create streams by pipeline config file
+    with open(args.pipeline_path, "rb") as f:
+        pipeline_str = f.read()
+
+    # Configuring a stream
+    ret = stream_manager_api.CreateMultipleStreams(pipeline_str)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        exit()
+
+    # Construct the input of the stream
+    data_input = MxDataInput()
+    # Stream_name encoded in UTF-8
+    stream_name = args.stream_name.encode()
+    print(stream_name)
+    predictions = []
+    with open(args.label_path, 'rt') as f:
+        val_cls = f.read().rstrip("\n").split("\n")
+    val_cls_dict = {}
+    for i, cls in enumerate(val_cls):
+        val_cls_dict[i] = cls
+    coco_gt = COCO(args.instances_path)
+    classs_dict = {}
+    cat_ids = coco_gt.loadCats(coco_gt.getCatIds())
+    for cat in cat_ids:
+        classs_dict[cat["name"]] = cat["id"]
+
+    for file_name in os.listdir(args.img_path):
+        pred_data = []
+        # Gets the Address of each image
+        img_id = int(file_name.split('.')[0])
+        file_path = args.img_path + file_name
+        size = (cv2.imread(file_path)).shape
+
+        # Read each photo in turn
+        with open(file_path, "rb") as f:
+            img_data = f.read()
+            if not img_data:
+                print(f"read empty data from img:{file_name}")
+                continue
+        # The element value img_data
+        data_input.data = img_data
+        boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)
+        pred_data.append({"boxes": boxes_output,
+                          "box_scores": scores_output,
+                          "img_id": img_id,
+                          "image_shape": size})
+
+        parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)
+        print(f"Inferred image:{file_name} success!")
+
+    # Save the result in JSON format
+    if not os.path.exists(args.res_path):
+        os.makedirs(args.res_path)
+    with open(args.res_path + 'predictions_test.json', 'w') as f:
+        json.dump(predictions, f)
+    stream_manager_api.DestroyAllStreams()
+
+
+if __name__ == "__main__":
+    args = parser.parse_args()
+    infer()
diff --git a/research/cv/RefineDet/infer/sdk/perf/eval_by_sdk.py b/research/cv/RefineDet/infer/sdk/perf/eval_by_sdk.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1ca7440afbb40719403664e27afcf05e13c830b
--- /dev/null
+++ b/research/cv/RefineDet/infer/sdk/perf/eval_by_sdk.py
@@ -0,0 +1,72 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""eval for coco map"""
+import argparse
+import os
+
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+
+# Create an ArgumentParser object
+parser = argparse.ArgumentParser(description='"Refinedet eval " "example."')
+
+# Gets the full path to the pro script
+current_path = os.path.abspath(os.path.join(os.getcwd(), ".."))
+
+parser.add_argument(
+    "--res_path",
+    type=str,
+    help="Get the JSON directory of inferred results",
+    default=os.path.join(current_path, "../sdk/result/predictions_test.json"),
+    required=False,
+)
+parser.add_argument(
+    "--instances_path",
+    type=str,
+    help="The annotation file directory for the COCO dataset",
+    default=os.path.join(current_path, "../data/coco2017/annotations/instances_val2017.json"),
+)
+
+def get_eval_result():
+    # Filter the blue samples in the script
+    coco_gt = COCO(args.instances_path)
+    image_id_flag = coco_gt.getImgIds()
+    need_img_ids = []
+    print("first dataset is {}".format(len(image_id_flag)))
+    for img_id in image_id_flag:
+        iscrowd = False
+        anno_ids = coco_gt.getAnnIds(imgIds=img_id, iscrowd=None)
+        anno = coco_gt.loadAnns(anno_ids)
+        for label in anno:
+            iscrowd = iscrowd or label["iscrowd"]
+        if iscrowd:
+            continue
+        need_img_ids.append(img_id)
+
+    # Get the eval value
+    coco_dt = coco_gt.loadRes(args.res_path)
+    E = COCOeval(coco_gt, coco_dt, iouType='bbox')
+    E.params.imgIds = need_img_ids
+    E.evaluate()
+    E.accumulate()
+    E.summarize()
+    mAP = E.stats[0]
+    print("\n========================================\n")
+    print(f"mAP: {mAP}")
+
+
+if __name__ == '__main__':
+    args = parser.parse_args()
+    get_eval_result()
diff --git a/research/cv/RefineDet/infer/sdk/perf/run_eval.sh b/research/cv/RefineDet/infer/sdk/perf/run_eval.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c0920fe3c417a3a5e6b16ad990efef7433923dc9
--- /dev/null
+++ b/research/cv/RefineDet/infer/sdk/perf/run_eval.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 eval_by_sdk.py
diff --git a/research/cv/RefineDet/infer/sdk/run.sh b/research/cv/RefineDet/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..092a8f9d5e86fb2ef10ab06ce2d805d5af739837
--- /dev/null
+++ b/research/cv/RefineDet/infer/sdk/run.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+pipeline_path=$1
+img_path=$2
+res_path=$3
+
+python3 main.py --pipeline_path ${pipeline_path} --img_path ${img_path} --res_path ${res_path}
diff --git a/research/cv/RefineDet/modelart/refinedet.py b/research/cv/RefineDet/modelart/refinedet.py
new file mode 100644
index 0000000000000000000000000000000000000000..10a9b3f1f95e4fcebd1bdd7b1cd6e9892cc3b413
--- /dev/null
+++ b/research/cv/RefineDet/modelart/refinedet.py
@@ -0,0 +1,257 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Train RefineDet and get checkpoint files."""
+import argparse
+import ast
+import os
+import numpy as np
+import mindspore
+import mindspore.nn as nn
+from mindspore import context, Tensor
+from mindspore.communication.management import init, get_rank
+from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor
+from mindspore.train import Model
+from mindspore.context import ParallelMode
+from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
+from mindspore.common import set_seed, dtype
+from src.config import get_config
+from src.dataset import create_refinedet_dataset, create_mindrecord
+from src.lr_schedule import get_lr
+from src.init_params import init_net_param
+from src.refinedet import refinedet_vgg16, refinedet_resnet101, RefineDetInferWithDecoder
+from src.refinedet_loss_cell import RefineDetLossCell, TrainingWrapper
+from src.box_utils import box_init
+
+set_seed(1)
+
+
+def get_args():
+    """get args for train"""
+    parser = argparse.ArgumentParser(description="RefineDet training script")
+    parser.add_argument("--using_mode", type=str, default="refinedet_vgg16_320",
+                        choices=("refinedet_vgg16_320", "refinedet_vgg16_512",
+                                 "refinedet_resnet101_320", "refinedet_resnet101_512"),
+                        help="which network you want to train, we present four networks: "
+                             "using vgg16 as backbone with 320x320 image size"
+                             "using vgg16 as backbone with 512x512 image size"
+                             "using resnet101 as backbone with 320x320 image size"
+                             "using resnet101 as backbone with 512x512 image size")
+    parser.add_argument("--run_online", type=ast.literal_eval, default=True,
+                        help="Run on Modelarts platform, need data_url, train_url if true, default is False.")
+    parser.add_argument("--data_url", type=str, default="JinPeng/RefineDET/data/COCO2017",
+                        help="using for OBS file system")
+    parser.add_argument("--train_url", type=str, default="JinPeng/RefineDET/output",
+                        help="using for OBS file system")
+    parser.add_argument("--pre_trained_url", type=str, default=None, help="Pretrained Checkpoint file url for OBS.")
+    parser.add_argument("--run_platform", type=str, default="Ascend", choices=("Ascend", "GPU", "CPU"),
+                        help="run platform, support Ascend, GPU and CPU.")
+    parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False,
+                        help="If set it true, only create Mindrecord, default is False.")
+    parser.add_argument("--distribute", type=ast.literal_eval, default=False,
+                        help="Run distribute, default is False.")
+    parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
+    parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
+    parser.add_argument("--lr", type=float, default=0.05, help="Learning rate, default is 0.05.")
+    parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink.")
+    parser.add_argument("--dataset", type=str, default="coco",
+                        help="Dataset, default is coco."
+                             "Now we have coco, voc0712, voc0712plus")
+    parser.add_argument("--epoch_size", type=int, default=1, help="Epoch size, default is 500.")
+    parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.")
+    parser.add_argument("--pre_trained", type=str, default=None, help="Pretrained Checkpoint file path.")
+    parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size.")
+    parser.add_argument("--save_checkpoint_epochs", type=int, default=10, help="Save checkpoint epochs, default is 10.")
+    parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.")
+    parser.add_argument("--filter_weight", type=ast.literal_eval, default=False,
+                        help="Filter head weight parameters, default is False.")
+    parser.add_argument('--debug', type=str, default="0", choices=["0", "1", "2", "3"],
+                        help="Active the debug mode. 0 for no debug mode,"
+                             "Under debug mode 1, the network would be run in PyNative mode,"
+                             "Under debug mode 2, all ascend log would be print on stdout,"
+                             "Under debug mode 3, all ascend log would be print on stdout."
+                             "And network will run in PyNative mode.")
+    parser.add_argument("--check_point", type=str, default="./ckpt",
+                        help="The directory path to save check point files")
+    parser.add_argument("--file_name", type=str, default="refinedet", help="output file name.")
+    parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR", "ONNX"], default='AIR',
+                        help='output file format')
+    parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend")
+    args_opt = parser.parse_args()
+    return args_opt
+
+
+def refinedet_model_build(config, args_opt):
+    """build refinedet network"""
+    if config.model == "refinedet_vgg16":
+        refinedet = refinedet_vgg16(config=config)
+        init_net_param(refinedet)
+    elif config.model == "refinedet_resnet101":
+        refinedet = refinedet_resnet101(config=config)
+        init_net_param(refinedet)
+    else:
+        raise ValueError(f'config.model: {config.model} is not supported')
+    return refinedet
+
+def export_to_air(args_opt, path, config):
+    context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
+    if args_opt.device_target == "Ascend":
+        context.set_context(device_id=args_opt.device_id)
+    default_boxes = box_init(config)
+    if config.model == "refinedet_vgg16":
+        net = refinedet_vgg16(config=config)
+    elif config.model == "refinedet_resnet101":
+        net = refinedet_resnet101(config=config)
+    else:
+        raise ValueError(f'config.model: {config.model} is not supported')
+    net = RefineDetInferWithDecoder(net, Tensor(default_boxes), config)
+
+    param_dict = load_checkpoint(path)
+    net.init_parameters_data()
+    load_param_into_net(net, param_dict)
+    net.set_train(False)
+
+    input_shp = [args_opt.batch_size, 3] + config.img_shape
+    input_array = Tensor(np.random.uniform(-1.0, 1.0, size=input_shp), mindspore.float32)
+    filename = os.path.join(args_opt.ckpt_file, args_opt.file_name)
+    export(net, input_array, file_name=filename + 'refinedet', file_format="AIR")
+
+
+def train_main(args_opt):
+    """main code for train refinedet"""
+    rank = 0
+    device_num = 1
+    # config with args
+    config = get_config(args_opt.using_mode, args_opt.dataset)
+    if args_opt.data_root:
+        config.coco_root = args_opt.data_root
+
+    # run mode config
+    if args_opt.debug == "1" or args_opt.debug == "3":
+        network_mode = context.PYNATIVE_MODE
+    else:
+        network_mode = context.GRAPH_MODE
+
+    # set run platform
+    if args_opt.run_platform == "CPU":
+        context.set_context(mode=network_mode, device_target="CPU")
+    else:
+        context.set_context(mode=network_mode, device_target=args_opt.run_platform, device_id=args_opt.device_id)
+        if args_opt.distribute:
+            device_num = args_opt.device_num
+            context.reset_auto_parallel_context()
+            context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
+                                              device_num=device_num)
+            init()
+            rank = get_rank()
+
+    mindrecord_file = create_mindrecord(config, args_opt.dataset, "refinedet.mindrecord", True)
+
+    if args_opt.only_create_dataset:
+        return
+
+    loss_scale = float(args_opt.loss_scale)
+    if args_opt.run_platform == "CPU":
+        loss_scale = 1.0
+
+    # When create MindDataset, using the fitst mindrecord file, such as
+    # refinedet.mindrecord0.
+    use_multiprocessing = (args_opt.run_platform != "CPU")
+    dataset = create_refinedet_dataset(config, mindrecord_file, repeat_num=1, batch_size=args_opt.batch_size,
+                                       device_num=device_num, rank=rank, use_multiprocessing=use_multiprocessing)
+
+    dataset_size = dataset.get_dataset_size()
+    print(f"Create dataset done! dataset size is {dataset_size}")
+    refinedet = refinedet_model_build(config, args_opt)
+    if ("use_float16" in config and config.use_float16) or args_opt.run_platform == "GPU":
+        refinedet.to_float(dtype.float16)
+    net = RefineDetLossCell(refinedet, config)
+
+    # checkpoint
+    ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs)
+    ckpt_prefix = args_opt.check_point + '/ckpt_'
+    save_ckpt_path = ckpt_prefix + str(rank) + '/'
+    strrank = str(rank)
+    print("strrank:", strrank)
+    ckpoint_cb = ModelCheckpoint(prefix="refinedet", directory=save_ckpt_path, config=ckpt_config)
+
+
+    if args_opt.pre_trained:
+        param_dict = load_checkpoint(args_opt.pre_trained)
+        load_param_into_net(net, param_dict, True)
+
+    lr = Tensor(get_lr(global_step=args_opt.pre_trained_epoch_size * dataset_size,
+                       lr_init=config.lr_init, lr_end=config.lr_end_rate * args_opt.lr, lr_max=args_opt.lr,
+                       warmup_epochs=config.warmup_epochs,
+                       total_epochs=args_opt.epoch_size,
+                       steps_per_epoch=dataset_size))
+
+    if "use_global_norm" in config and config.use_global_norm:
+        opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr,
+                          config.momentum, config.weight_decay, 1.0)
+        net = TrainingWrapper(net, opt, loss_scale, True)
+    else:
+        opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr,
+                          config.momentum, config.weight_decay, loss_scale)
+        net = TrainingWrapper(net, opt, loss_scale)
+
+    callback = [TimeMonitor(data_size=dataset_size), LossMonitor(), ckpoint_cb]
+    model = Model(net)
+    dataset_sink_mode = False
+    if args_opt.mode == "sink" and args_opt.run_platform != "CPU":
+        print("In sink mode, one epoch return a loss.")
+        dataset_sink_mode = True
+    print("Start train RefineDet, the first epoch will be slower because of the graph compilation.")
+    model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode)
+
+    filepath = os.path.join(args_opt.ckpt_file, "ckpt_" + strrank + "/")
+    path = os.path.join(filepath, "refinedet" + "-" + str(args_opt.epoch_size) + "_" + str(dataset_size) + ".ckpt")
+    print("Path:", path)
+    export_to_air(args_opt, path, config)
+
+
+def main():
+    args_opt = get_args()
+    args_opt.data_root = 0
+    # copy files if online
+    if args_opt.run_online:
+        import moxing as mox
+        args_opt.device_id = int(os.getenv('DEVICE_ID'))
+        args_opt.device_num = int(os.getenv('RANK_SIZE'))
+        dir_root = os.getcwd()
+        data_root = os.path.join(dir_root, "data")
+        ckpt_root = os.path.join(dir_root, args_opt.check_point)
+        mox.file.copy_parallel(args_opt.data_url, data_root)
+        args_opt.data_root = data_root
+        args_opt.ckpt_file = ckpt_root
+
+        if args_opt.pre_trained:
+            mox.file.copy_parallel(args_opt.pre_trained_url, args_opt.pre_trained)
+    # print log to stdout
+    if args_opt.debug == "2" or args_opt.debug == "3":
+        os.environ["SLOG_PRINT_TO_STDOUT"] = "1"
+        os.environ["ASCEND_SLOG_PRINT_TO_STDOUT"] = "1"
+        os.environ["ASCEND_GLOBAL_LOG_LEVEL"] = "1"
+    train_main(args_opt)
+    context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
+    # print("ckpt_root:", ckpt_root)
+    # path = os.path.join(ckpt_root, 'refinedet'+'-'+str(args_opt.epoch_size)+'_'+'.ckpt')
+    if args_opt.device_target == "Ascend":
+        context.set_context(device_id=args_opt.device_id)
+    if args_opt.run_online:
+        mox.file.copy_parallel(ckpt_root, args_opt.train_url)
+
+
+if __name__ == '__main__':
+    main()