diff --git a/.jenkins/check/config/filter_cpplint.txt b/.jenkins/check/config/filter_cpplint.txt
index b7d06117087858230634bae29f841d0e60500f21..2be251ffb1be97b362b64c28bb3ac548ffa187e4 100644
--- a/.jenkins/check/config/filter_cpplint.txt
+++ b/.jenkins/check/config/filter_cpplint.txt
@@ -49,4 +49,7 @@
 "models/official/cv/posenet/infer/mxbase/src/Posenet.cpp" "runtime/references"
 "models/official/cv/posenet/infer/mxbase/src/main.cpp" "runtime/references"
 
-"models/research/cv/ibnnet/infer/mxbase/src/IbnnetOpencv.h" "runtime/references"
\ No newline at end of file
+"models/research/cv/ibnnet/infer/mxbase/src/IbnnetOpencv.h" "runtime/references"
+
+"models/research/cv/fairmot/infer/mxbase/src/Fairmot.h" "runtime/references"
+"models/research/cv/fairmot/infer/mxbase/src/PostProcess/FairmotMindsporePost.h" "runtime/references"
\ No newline at end of file
diff --git a/research/cv/fairmot/Dockerfile b/research/cv/fairmot/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..053bf80cd2309a41b6033b3e8d5ab4f87d41bd5e
--- /dev/null
+++ b/research/cv/fairmot/Dockerfile
@@ -0,0 +1,5 @@
+ARG FROM_IMAGE_NAME
+FROM ${FROM_IMAGE_NAME}
+
+COPY requirements.txt .
+RUN pip3.7 install -r requirements.txt
\ No newline at end of file
diff --git a/research/cv/fairmot/fairmot_train.py b/research/cv/fairmot/fairmot_train.py
index e35337d85eaf61dd6161dff803501ec42a554798..c9217ae13497df1d70e5c9fa477bb92696f86c3e 100644
--- a/research/cv/fairmot/fairmot_train.py
+++ b/research/cv/fairmot/fairmot_train.py
@@ -21,7 +21,6 @@ from mindspore import dtype as mstype
 from mindspore import Model
 import mindspore.nn as nn
 import mindspore.dataset as ds
-from mindspore.common import set_seed
 from mindspore.context import ParallelMode
 from mindspore.train.callback import TimeMonitor, ModelCheckpoint, CheckpointConfig
 from mindspore.communication.management import init
@@ -34,8 +33,7 @@ from src.utils.lr_schedule import dynamic_lr
 from src.utils.jde import JointDataset
 from src.utils.callback import LossCallback
 
-set_seed(1234)
-ds.config.set_seed(1234)
+
 def train(opt):
     """train fairmot."""
     local_data_path = '/cache/data'
@@ -71,7 +69,8 @@ def train(opt):
         context.set_context(device_id=device_id, mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
         context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
                                           gradients_mean=True,
-                                          device_num=device_num
+                                          device_num=device_num,
+                                          parameter_broadcast=True
                                           )
         init()
     else:
diff --git a/research/cv/fairmot/infer/Dockerfile b/research/cv/fairmot/infer/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..053bf80cd2309a41b6033b3e8d5ab4f87d41bd5e
--- /dev/null
+++ b/research/cv/fairmot/infer/Dockerfile
@@ -0,0 +1,5 @@
+ARG FROM_IMAGE_NAME
+FROM ${FROM_IMAGE_NAME}
+
+COPY requirements.txt .
+RUN pip3.7 install -r requirements.txt
\ No newline at end of file
diff --git a/research/cv/fairmot/infer/README_CN.md b/research/cv/fairmot/infer/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..96c9d5631c35d80ba766ec4ff9f59de9c256efe0
--- /dev/null
+++ b/research/cv/fairmot/infer/README_CN.md
@@ -0,0 +1,182 @@
+# 鎺ㄧ悊
+
+## 妯″瀷杞崲
+
+   ```bash
+
+   cd infer/convert
+
+   ```
+
+1. 鍑嗗妯″瀷鏂囦欢銆�
+
+   AIR妯″瀷涓哄湪鏄囪吘910鏈嶅姟鍣ㄤ笂瀵煎嚭鐨勬ā鍨嬶紝瀵煎嚭AIR妯″瀷鐨勮缁嗘楠よ鍙傝鈥滄ā鍨嬭缁冣€濄€�
+
+2. 鎵ц浠ヤ笅鍛戒护锛岃繘琛屾ā鍨嬭浆鎹€€�
+
+   杞崲璇︾粏淇℃伅鍙煡鐪嬭浆鎹㈣剼鏈拰瀵瑰簲鐨凙IPP閰嶇疆鏂囦欢锛岃浆鎹㈠懡浠ゅ涓嬨€�
+
+   **bash convert/convert_om.sh** *air_path* *aipp_cfg_path* *om_path*
+
+   | 鍙傛暟          | 璇存槑                                              |
+   | ------------- | ------------------------------------------------- |
+   | air_path      | 杞崲鑴氭湰AIR鏂囦欢璺緞銆�                             |
+   | aipp_cfg_path | AIPP閰嶇疆鏂囦欢璺緞銆�                                |
+   | om_path       | 鐢熸垚鐨凮M鏂囦欢鍚嶏紝杞崲鑴氭湰浼氬湪姝ゅ熀纭€涓婃坊鍔�.om鍚庣紑銆� |
+
+   杞崲绀轰緥濡備笅鎵€绀恒€�
+
+   ```bash
+
+   # 杞崲妯″瀷
+   bash convert_om.sh fairmot.air aipp_rgb.cfg fairmot
+
+   ```
+
+## 鎺ㄧ悊鏁版嵁闆嗕笅杞�
+
+1. 涓嬭浇鎺ㄧ悊鏁版嵁闆哰MOT20](https://motchallenge.net/data/MOT20/)銆�
+
+2. 灏嗘暟鎹泦瀛樻斁鍦╜infer/data/MOT20`鐩綍涓嬨€�
+
+3. 鐩綍鏍煎紡涓猴細
+
+```text
+   鈹斺攢fairmot
+      鈹溾攢infer
+        鈹溾攢data
+        鈹� 鈹溾攢MOT20
+        鈹� 鈹� 鈹斺攢train
+        鈹� 鈹�    鈹溾攢MOT20-01
+        鈹� 鈹�    鈹溾攢MOT20-02
+        鈹� 鈹�    鈹溾攢MOT20-03
+        鈹� 鈹�    鈹斺攢MOT20-05
+        鈹� 鈹斺攢data.json
+        鈹溾攢infer
+        鈹� 鈹溾攢convert                   //  妯″瀷杞崲鑴氭湰
+        鈹� 鈹溾攢mxbase                   // mxbase 鎺ㄧ悊鑴氭湰
+        鈹� 鈹斺攢sdk                   // sdk鎺ㄧ悊鑴氭湰
+```
+
+## mxBase鎺ㄧ悊
+
+   ```bash
+
+   cd infer/mxbase
+
+   ```
+
+1. 缂栬瘧宸ョ▼銆�
+
+   鐩墠mxBase鎺ㄧ悊浠呭疄鐜颁簡鍩轰簬DVPP鏂瑰紡鎺ㄧ悊銆�
+
+   ```bash
+
+   bash build.sh
+
+   ```
+
+2. 锛堝彲閫夛級淇敼閰嶇疆鏂囦欢銆�
+
+   鍙牴鎹疄闄呮儏鍐典慨鏀癸紝閰嶇疆鏂囦欢浣嶄簬`mxbase/src/main.cpp`涓紝鍙慨鏀瑰弬鏁板涓嬨€�
+
+   ```c++
+
+   namespace {
+   const uint32_t DEVICE_ID = 0;
+   } // namespace
+   ...
+
+   ```
+
+3. 杩愯鎺ㄧ悊鏈嶅姟銆�
+
+   杩愯鎺ㄧ悊鏈嶅姟锛�
+   **./build/fairmot_mindspore**  *om_path* *img_path*
+   | 鍙傛暟       | 璇存槑                           |
+   | ---------- | ------------------------------ |
+   | om_path | om瀛樻斁璺緞銆傚锛歚../convert/fairmot.om`銆� |
+   | img_path | 鎺ㄧ悊鍥剧墖璺緞銆傚锛歚../../data/MOT20/`銆� |
+
+   ```bash
+
+   ./build/fairmot_mindspore ../convert/fairmot.om ../../data/MOT20/
+
+   ```
+
+4. 瑙傚療缁撴灉銆�
+   鎺ㄧ悊缁撴灉浠xt鏍煎紡淇濆瓨锛岃矾寰勪负`../../data/MOT20/result_Files`.
+
+5. 鍙鍖栫粨鏋滃苟寰楀埌绮惧害銆�
+
+   杩愯绮惧害娴嬭瘯浠ュ強鍙鍖栵細
+   **python3.7 mx_base_eval.py**  *result_path*
+   | 鍙傛暟       | 璇存槑                           |
+   | ---------- | ------------------------------ |
+   | result_path | 鎺ㄧ悊缁撴灉璺緞銆傚锛氣€�../../data/MOT20鈥濄€� |
+
+   ```bash
+
+   cd ../../
+   python3.7 mx_base_eval.py --data_dir data/MOT20
+
+   ```
+
+6. 鏌ョ湅绮惧害缁撴灉浠ュ強鍙鍖栫粨鏋溿€�
+
+   鍥剧墖淇濆瓨鍦╜data/MOT20/result`閲屻€傜簿搴︽祴璇曠粨鏋滃湪杩愯瀹宍mxbase_eval.py`浠ュ悗浼氬湪缁堢鏄剧ず骞朵互xlsx鏂囦欢鏍煎紡淇濆瓨鍦╜data/MOT20`銆�
+
+## MindX SDK鎺ㄧ悊
+
+   ```bash
+
+   cd infer/sdk
+
+   ```
+
+1. 锛堝彲閫夛級淇敼閰嶇疆鏂囦欢銆�
+
+   1. 鍙牴鎹疄闄呮儏鍐典慨鏀筽ipeline鏂囦欢銆�
+
+      ```python
+
+      鈹溾攢鈹€ config
+      鈹�   鈹溾攢鈹€ config.py
+      鈹�   鈹斺攢鈹€  fairmot.pipeline # PIPELINE鏂囦欢
+
+      ```
+
+2. 妯″瀷鎺ㄧ悊銆�
+
+   1. 鎵ц鎺ㄧ悊銆�
+
+      鍒囨崲鍒皊dk鐩綍涓嬶紝鎵ц鎺ㄧ悊鑴氭湰銆�
+      **python main.py**  *img_path* *pipeline_path* *infer_result_path* *infer_mode*
+      | 鍙傛暟        | 璇存槑                                  |
+      | ----------- | ------------------------------------- |
+      | img_path  | 鎺ㄧ悊鍥剧墖璺緞銆傚锛氣€�../data/MOT20鈥濄€�        |
+      | pipeline_path | 瀛樻斁pipeline璺緞銆傚锛�"./config/fairmot.pipeline"銆� |
+      | infer_result_path | 瀛樻斁鎺ㄧ悊缁撴灉鐨勮矾寰勩€傚锛�"../data/infer_result"銆� |
+      | infer_mode | 鎺ㄧ悊妯″紡锛岄粯璁や负infer锛屽彲鐢╡val鐩存帴寰楀埌绮惧害瀵规瘮 |
+
+      ```bash
+
+      python3.7 main.py --img_path ../data/MOT20 --pipeline_path ./config/fairmot.pipeline --infer_result_path ../data/infer_result
+
+      ```
+
+   2. 鏌ョ湅鎺ㄧ悊缁撴灉銆�
+      鎺ㄧ悊缁撴灉浠in鏂囦欢褰㈠紡淇濆瓨鍦╜../data/infer_result`鐩綍涓嬨€�
+3. 鎵ц绮惧害娴嬭瘯浠ュ強鍙鍖栥€�
+
+   鍒囨崲鍒癴airmot鐩綍涓嬶紝鎵ц鎺ㄧ悊鑴氭湰锛�
+      **python3.7 sdk_eval.py**  *img_path*
+
+   ```bash
+   cd ../
+   python3.7 sdk_eval.py --data_dir ./data/MOT20
+   ```
+
+4. 鏌ョ湅绮惧害缁撴灉浠ュ強鍙鍖栫粨鏋溿€�
+
+   鍥剧墖淇濆瓨鍦╜data/MOT20/results`閲屻€傜簿搴︽祴璇曠粨鏋滃湪杩愯瀹宍sdk_eval.py`浠ュ悗浼氬湪缁堢鏄剧ず骞朵互xlsx鏂囦欢鏍煎紡淇濆瓨鍦╜data/MOT20`銆�
diff --git a/research/cv/fairmot/infer/convert/aipp_rgb.cfg b/research/cv/fairmot/infer/convert/aipp_rgb.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1d9e9285cbf5770920166365328441dcfcf15ad4
--- /dev/null
+++ b/research/cv/fairmot/infer/convert/aipp_rgb.cfg
@@ -0,0 +1,10 @@
+aipp_op {
+    aipp_mode : static
+    input_format : RGB888_U8
+    related_input_rank : 0
+    csc_switch : false
+    rbuv_swap_switch : true
+    var_reci_chn_0 : 0.003921568627451
+    var_reci_chn_1 : 0.003921568627451
+    var_reci_chn_2 : 0.003921568627451
+}
\ No newline at end of file
diff --git a/research/cv/fairmot/infer/convert/convert_om.sh b/research/cv/fairmot/infer/convert/convert_om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a3ce8996bea503e670150c0d17d28fa88c760224
--- /dev/null
+++ b/research/cv/fairmot/infer/convert/convert_om.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -ne 3 ]; then
+  echo "Wrong parameter format."
+  echo "Usage:"
+  echo "         bash $0 [INPUT_AIR_PATH] [AIPP_PATH] [OUTPUT_OM_PATH_NAME]"
+  echo "Example: "
+  echo "         bash convert_om.sh  xxx.air ./aipp.cfg xx"
+
+  exit 1
+fi
+
+input_air_path=$1
+aipp_cfg_file=$2
+output_om_path=$3
+
+export install_path=/usr/local/Ascend/
+
+export ASCEND_ATC_PATH=${install_path}/atc
+export LD_LIBRARY_PATH=${install_path}/atc/lib64:$LD_LIBRARY_PATH
+export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH
+export PYTHONPATH=${install_path}/atc/python/site-packages:${install_path}/latest/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/atc/python/site-packages/schedule_search.egg
+export ASCEND_OPP_PATH=${install_path}/opp
+
+export ASCEND_SLOG_PRINT_TO_STDOUT=1
+
+echo "Input AIR file path: ${input_air_path}"
+echo "Output OM file path: ${output_om_path}"
+
+atc --input_format=NCHW \
+  --framework=1 \
+  --model="${input_air_path}" \
+  --input_shape="x:1, 3, 608, 1088" \
+  --output="${output_om_path}" \
+  --insert_op_conf="${aipp_cfg_file}" \
+  --soc_version=Ascend310 \
+  --op_select_implmode=high_precision \
+  --output_type=FP32
diff --git a/research/cv/fairmot/infer/docker_start_infer.sh b/research/cv/fairmot/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..71ecca70cb01657a7f6ea54b5fef3270bf36cdef
--- /dev/null
+++ b/research/cv/fairmot/infer/docker_start_infer.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+#coding = utf-8
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the BSD 3-Clause License  (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://opensource.org/licenses/BSD-3-Clause
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_dir=$2
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image data_dir"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${data_dir}" ]; then
+        echo "please input data_dir"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it \
+  --device=/dev/davinci3 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${data_dir}:${data_dir} \
+  ${docker_image} \
+  /bin/bash
diff --git a/research/cv/fairmot/infer/mxbase/CMakeLists.txt b/research/cv/fairmot/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..44298b8f69424f6aef06511484373fb96275824e
--- /dev/null
+++ b/research/cv/fairmot/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,43 @@
+cmake_minimum_required(VERSION 3.5.2)
+project(fairmot)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+#set(PLUGIN_NAME "fairmot_mindspore_post")
+set(TARGET_LIBRARY fairmot_mindspore_post)
+set(TARGET_MAIN fairmot_mindspore)
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+message($ENV{MX_SDK_HOME})
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+include_directories(${ACL_LIB_PATH}/include)
+link_directories(${ACL_LIB_PATH}/lib64/)
+
+add_library(${TARGET_LIBRARY} SHARED src/PostProcess/FairmotMindsporePost.cpp)
+
+target_link_libraries(${TARGET_LIBRARY} glib-2.0 gstreamer-1.0 gobject-2.0 gstbase-1.0 gmodule-2.0)
+target_link_libraries(${TARGET_LIBRARY} plugintoolkit mxpidatatype mxbase)
+target_link_libraries(${TARGET_LIBRARY} -Wl,-z,relro,-z,now,-z,noexecstack -s)
+
+message("TARGET_LIBRARY:${TARGET_LIBRARY}.")
+
+
+add_executable(${TARGET_MAIN} src/main.cpp src/Fairmot.cpp)
+target_link_libraries(${TARGET_MAIN} ${TARGET_LIBRARY} glog  cpprest mxbase libascendcl.so opencv_world)
diff --git a/research/cv/fairmot/infer/mxbase/build.sh b/research/cv/fairmot/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..119b5594808c2f35f9d96feafee9690e78117058
--- /dev/null
+++ b/research/cv/fairmot/infer/mxbase/build.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+# env
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+  if ! cmake ..; then
+    echo "cmake failed."
+    return 1
+  fi
+
+  if ! (make); then
+    echo "make failed."
+    return 1
+  fi
+
+  return 0
+}
+
+if make_plugin; then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
diff --git a/research/cv/fairmot/infer/mxbase/src/Fairmot.cpp b/research/cv/fairmot/infer/mxbase/src/Fairmot.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..935d76ecb28d321f68597251d9bffbd9262f249f
--- /dev/null
+++ b/research/cv/fairmot/infer/mxbase/src/Fairmot.cpp
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Fairmot.h"
+
+#include <dirent.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#define BOOST_BIND_GLOBAL_PLACEHOLDERS
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+#include <boost/property_tree/json_parser.hpp>
+#include <opencv4/opencv2/core.hpp>
+#include <opencv4/opencv2/opencv.hpp>
+
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+#include "acl/acl.h"
+namespace {
+const uint32_t YUV_BYTE_NU = 3;
+const uint32_t YUV_BYTE_DE = 2;
+const uint32_t FRAME_RATE = 25;
+const uint32_t MODEL_HEIGHT = 768;
+const uint32_t MODEL_WIDTH = 1280;
+const float CONF_THRES = 0.4;
+}  // namespace
+
+void PrintTensorShape(const std::vector<MxBase::TensorDesc> &tensorDescVec,
+                      const std::string &tensorName) {
+  LogInfo << "The shape of " << tensorName << " is as follows:";
+  for (size_t i = 0; i < tensorDescVec.size(); ++i) {
+    LogInfo << "  Tensor " << i << ":";
+    for (size_t j = 0; j < tensorDescVec[i].tensorDims.size(); ++j) {
+      LogInfo << "   dim: " << j << ": " << tensorDescVec[i].tensorDims[j];
+    }
+  }
+}
+
+APP_ERROR Fairmot::Init(const InitParam &initParam) {
+  deviceId_ = initParam.deviceId;
+  APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+  if (ret != APP_ERR_OK) {
+    LogError << "Init devices failed, ret=" << ret << ".";
+    return ret;
+  }
+  ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+  if (ret != APP_ERR_OK) {
+    LogError << "Set context failed, ret=" << ret << ".";
+    return ret;
+  }
+  dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
+  ret = dvppWrapper_->Init();
+  if (ret != APP_ERR_OK) {
+    LogError << "DvppWrapper init failed, ret=" << ret << ".";
+    return ret;
+  }
+  model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+  ret = model_->Init(initParam.modelPath, modelDesc_);
+  if (ret != APP_ERR_OK) {
+    LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+    return ret;
+  }
+
+  PrintTensorShape(modelDesc_.inputTensors, "Model Input Tensors");
+  PrintTensorShape(modelDesc_.outputTensors, "Model Output Tensors");
+
+  MxBase::ConfigData configData;
+  const std::string checkTensor = initParam.checkTensor ? "true" : "false";
+
+  configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum));
+  configData.SetJsonValue("SCORE_THRESH",
+                          std::to_string(initParam.scoreThresh));
+  configData.SetJsonValue("IOU_THRESH", std::to_string(initParam.iouThresh));
+  configData.SetJsonValue("CHECK_MODEL", checkTensor);
+
+  auto jsonStr = configData.GetCfgJson().serialize();
+  std::map<std::string, std::shared_ptr<void>> config;
+  config["postProcessConfigContent"] = std::make_shared<std::string>(jsonStr);
+  post_ = std::make_shared<MxBase::FairmotMindsporePost>();
+  ret = post_->Init(config);
+  if (ret != APP_ERR_OK) {
+    LogError << "Fairmot init failed, ret=" << ret << ".";
+    return ret;
+  }
+  return APP_ERR_OK;
+}
+
+APP_ERROR Fairmot::DeInit() {
+  dvppWrapper_->DeInit();
+  model_->DeInit();
+  post_->DeInit();
+  MxBase::DeviceManager::GetInstance()->DestroyDevices();
+  return APP_ERR_OK;
+}
+
+APP_ERROR Fairmot::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                             std::vector<MxBase::TensorBase> &outputs) {
+  auto dtypes = model_->GetOutputDataType();
+  for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+    std::vector<uint32_t> shape = {};
+    for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+      shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+    }
+    MxBase::TensorBase tensor(shape, dtypes[i],
+                              MxBase::MemoryData::MemoryType::MEMORY_DEVICE,
+                              deviceId_);
+    APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+    if (ret != APP_ERR_OK) {
+      LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+      return ret;
+    }
+    outputs.push_back(tensor);
+  }
+  MxBase::DynamicInfo dynamicInfo = {};
+  dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+  auto startTime = std::chrono::high_resolution_clock::now();
+  APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
+  auto endTime = std::chrono::high_resolution_clock::now();
+  double costMs = std::chrono::duration<double, std::milli>(endTime - startTime)
+                      .count();  // save time
+  inferCostTimeMilliSec += costMs;
+  if (ret != APP_ERR_OK) {
+    LogError << "ModelInference failed, ret=" << ret << ".";
+    return ret;
+  }
+  return APP_ERR_OK;
+}
+
+APP_ERROR Fairmot::PostProcess(const std::vector<MxBase::TensorBase> &inputs,
+                               MxBase::JDETracker &tracker,
+                               MxBase::Files &file) {
+  APP_ERROR ret = post_->Process(inputs, tracker, file);
+  if (ret != APP_ERR_OK) {
+    LogError << "Process failed, ret=" << ret << ".";
+    return ret;
+  }
+  return APP_ERR_OK;
+}
+
+void SaveInferResult(const std::vector<MxBase::ObjectInfo> &objInfos,
+                     const std::string &resultPath) {
+  if (objInfos.empty()) {
+    LogWarn << "The predict result is empty.";
+    return;
+  }
+
+  namespace pt = boost::property_tree;
+  pt::ptree root, data;
+  int index = 0;
+  for (auto &obj : objInfos) {
+    ++index;
+    LogInfo << "BBox[" << index << "]:[x0=" << obj.x0 << ", y0=" << obj.y0
+            << ", x1=" << obj.x1 << ", y1=" << obj.y1
+            << "], confidence=" << obj.confidence << ", classId=" << obj.classId
+            << ", className=" << obj.className << std::endl;
+    pt::ptree item;
+    item.put("classId", obj.classId);
+    item.put("className", obj.className);
+    item.put("confidence", obj.confidence);
+    item.put("x0", obj.x0);
+    item.put("y0", obj.y0);
+    item.put("x1", obj.x1);
+    item.put("y1", obj.y1);
+
+    data.push_back(std::make_pair("", item));
+  }
+  root.add_child("data", data);
+  pt::json_parser::write_json(resultPath, root, std::locale(), false);
+}
+std::string RealPath(std::string path) {
+  char realPathMem[PATH_MAX] = {0};
+  char *realPathRet = nullptr;
+  realPathRet = realpath(path.data(), realPathMem);
+  if (realPathRet == nullptr) {
+    std::cout << "File: " << path << " is not exist.";
+    return "";
+  }
+
+  std::string realPath(realPathMem);
+  std::cout << path << " realpath is: " << realPath << std::endl;
+  return realPath;
+}
+
+DIR *OpenDir(std::string dirName) {
+  if (dirName.empty()) {
+    std::cout << " dirName is null ! " << std::endl;
+    return nullptr;
+  }
+  std::string realPath = RealPath(dirName);
+  struct stat s;
+  lstat(realPath.c_str(), &s);
+  if (!S_ISDIR(s.st_mode)) {
+    std::cout << "dirName is not a valid directory !" << std::endl;
+    return nullptr;
+  }
+  DIR *dir = opendir(realPath.c_str());
+  if (dir == nullptr) {
+    std::cout << "Can not open dir " << dirName << std::endl;
+    return nullptr;
+  }
+  std::cout << "Successfully opened the dir " << dirName << std::endl;
+  return dir;
+}
+
+std::vector<std::string> GetAllFiles(std::string dirName) {
+  struct dirent *filename;
+  DIR *dir = OpenDir(dirName);
+  if (dir == nullptr) {
+    return {};
+  }
+  std::vector<std::string> res;
+  while ((filename = readdir(dir)) != nullptr) {
+    std::string dName = std::string(filename->d_name);
+    if (dName == "." || dName == ".." || filename->d_type != DT_REG) {
+      continue;
+    }
+    res.emplace_back(std::string(dirName) + "/" + filename->d_name);
+  }
+  std::sort(res.begin(), res.end());
+  return res;
+}
+
+APP_ERROR Fairmot::ReadImageCV(const std::string &imgPath, cv::Mat &imageMat,
+                               ImageShape &imgShape) {
+  imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+  imgShape.width = imageMat.cols;
+  imgShape.height = imageMat.rows;
+  return APP_ERR_OK;
+}
+
+APP_ERROR Fairmot::ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat,
+                               ImageShape &imgShape) {
+  int height = 608;
+  int width = 1088;
+  float ratio = std::min(static_cast<float>(height) / srcImageMat.rows,
+                         static_cast<float>(width) / srcImageMat.cols);
+  std::vector<int> new_shape = {
+      static_cast<int>(round(srcImageMat.rows * ratio)),
+      static_cast<int>(round(srcImageMat.cols * ratio))};
+  int tmp = 2;
+  float dw = static_cast<float>((width - new_shape[1])) / tmp;
+  float dh = static_cast<float>((height - new_shape[0])) / tmp;
+  int top = round(dh - 0.1);
+  int bottom = round(dh + 0.1);
+  int left = round(dw - 0.1);
+  int right = round(dw + 0.1);
+  cv::Mat tmp_img;
+  cv::resize(srcImageMat, tmp_img, cv::Size(new_shape[1], new_shape[0]), 0, 0,
+             cv::INTER_AREA);
+  cv::Scalar value = cv::Scalar(127.5, 127.5, 127.5);
+  cv::copyMakeBorder(tmp_img, dstImageMat, top, bottom, left, right,
+                     cv::BORDER_CONSTANT, value);
+  imgShape.width = dstImageMat.cols;
+  imgShape.height = dstImageMat.rows;
+  return APP_ERR_OK;
+}
+
+APP_ERROR Fairmot::CVMatToTensorBase(const cv::Mat &imageMat,
+                                     MxBase::TensorBase &tensorBase) {
+  const uint32_t dataSize = imageMat.cols * imageMat.rows * imageMat.channels();
+  MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE,
+                                   deviceId_);
+  MxBase::MemoryData memoryDataSrc(imageMat.data, dataSize,
+                                   MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+  APP_ERROR ret =
+      MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+  if (ret != APP_ERR_OK) {
+    LogError << GetError(ret) << "Memory malloc failed.";
+    return ret;
+  }
+  std::vector<uint32_t> shape = {static_cast<uint32_t>(imageMat.rows),
+                                 static_cast<uint32_t>(imageMat.cols),
+                                 static_cast<uint32_t>(imageMat.channels())};
+  tensorBase = MxBase::TensorBase(memoryDataDst, false, shape,
+                                  MxBase::TENSOR_DTYPE_UINT8);
+  return APP_ERR_OK;
+}
+
+APP_ERROR Fairmot::GetMetaMap(const ImageShape imgShape,
+                              const ImageShape resizeimgShape,
+                              MxBase::JDETracker &tracker) {
+  std::vector<float> c = {static_cast<float>(imgShape.width) / 2,
+                          static_cast<float>(imgShape.height) / 2};
+  float s = std::max(static_cast<float>(resizeimgShape.width) /
+                         static_cast<float>(resizeimgShape.height) *
+                         static_cast<float>(imgShape.height),
+                     static_cast<float>(imgShape.width)) *
+            1.0;
+  tracker.c = c;
+  tracker.s = s;
+  int tmp = 4;
+  tracker.out_height = resizeimgShape.height / tmp;
+  tracker.out_width = resizeimgShape.width / tmp;
+  return APP_ERR_OK;
+}
+
+void Fairmot::WriteResult(const std::string &result_filename,
+                          std::vector<MxBase::Results *> results) {
+  FILE *fp;
+  fp = std::fopen(result_filename.c_str(), "w");
+  for (int i = 0; i < results.size(); i++) {
+    std::vector<cv::Mat> online_tlwhs = (*results[i]).online_tlwhs;
+    std::vector<int> online_ids = (*results[i]).online_ids;
+    int frame_id = (*results[i]).frame_id;
+    for (int j = 0; j < online_tlwhs.size(); j++) {
+      if (online_ids[j] < 0) {
+        continue;
+      }
+      double x1, y1, w, h;
+      x1 = online_tlwhs[j].at<double>(0, 0);
+      y1 = online_tlwhs[j].at<double>(0, 1);
+      w = online_tlwhs[j].at<double>(0, 2);
+      h = online_tlwhs[j].at<double>(0, 3);
+      double x2, y2;
+      x2 = x1 + w;
+      y2 = y1 + h;
+      fprintf(fp, "%d,%d,%.13lf,%.13lf,%.13lf,%.13lf,%d,%d,%d,%d\n", frame_id,
+              (online_ids[j]), x1, y1, w, h, 1, -1, -1, -1);
+    }
+  }
+  fclose(fp);
+}
+
+APP_ERROR Fairmot::Process(const std::string &imgPath) {
+  ImageShape imageShape{};
+  ImageShape resizedImageShape{};
+  std::vector<std::string> seqs = {"/MOT20-01", "/MOT20-02", "/MOT20-03",
+                                   "/MOT20-05"};
+  std::string homePath = imgPath + "/result_Files";
+  if (access(homePath.c_str(), 0) != 0) {
+    std::string cmd = "mkdir " + homePath;
+    system(cmd.data());
+  }
+  for (const auto &seq : seqs) {
+    std::string result_filename = homePath + seq + ".txt";
+    std::string image_path = imgPath + "/train" + seq + "/img1";
+    std::vector<std::string> images = GetAllFiles(image_path);
+    int frame_rate = 25;
+    MxBase::JDETracker tracker(frame_rate);
+    MxBase::Files file;
+    for (const auto &image_file : images) {
+      LogInfo << image_file;
+      int tmp = 20;
+      if (file.frame_id % tmp == 0) {
+        LogInfo << "Processing frame " << file.frame_id;
+      }
+      cv::Mat imageMat;
+      APP_ERROR ret = ReadImageCV(image_file, imageMat, imageShape);
+      if (ret != APP_ERR_OK) {
+        LogError << "ReadImage failed, ret=" << ret << ".";
+        return ret;
+      }
+      ret = ResizeImage(imageMat, imageMat, resizedImageShape);
+      if (ret != APP_ERR_OK) {
+        LogError << "ResizeImage failed, ret=" << ret << ".";
+        return ret;
+      }
+      ret = GetMetaMap(imageShape, resizedImageShape, tracker);
+      if (ret != APP_ERR_OK) {
+        LogError << "GetMetaMap failed, ret=" << ret << ".";
+        return ret;
+      }
+      MxBase::TensorBase tensorBase;
+      ret = CVMatToTensorBase(imageMat, tensorBase);
+      if (ret != APP_ERR_OK) {
+        LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
+        return ret;
+      }
+      std::vector<MxBase::TensorBase> inputs = {};
+      std::vector<MxBase::TensorBase> outputs = {};
+      inputs.push_back(tensorBase);
+      auto startTime = std::chrono::high_resolution_clock::now();
+      ret = Inference(inputs, outputs);
+      auto endTime = std::chrono::high_resolution_clock::now();
+      double costMs =
+          std::chrono::duration<double, std::milli>(endTime - startTime)
+              .count();  // save time
+      inferCostTimeMilliSec += costMs;
+      if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+      }
+      tracker.seq = seq;
+      tracker.image_file = image_file;
+      ret = PostProcess(outputs, tracker, file);
+      if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+      }
+    }
+    WriteResult(result_filename, file.results);
+  }
+  return APP_ERR_OK;
+}
diff --git a/research/cv/fairmot/infer/mxbase/src/Fairmot.h b/research/cv/fairmot/infer/mxbase/src/Fairmot.h
new file mode 100644
index 0000000000000000000000000000000000000000..9f36ca10185631d34c15f8f3af5215cbfb466d6e
--- /dev/null
+++ b/research/cv/fairmot/infer/mxbase/src/Fairmot.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <memory>
+#include <string>
+#include <vector>
+#include <opencv2/opencv.hpp>
+
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/PostProcessBases/ObjectPostProcessBase.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+#include "PostProcess/FairmotMindsporePost.h"
+
+struct InitParam {
+  uint32_t deviceId;
+  std::string labelPath;
+  uint32_t classNum;
+  float iouThresh;
+  float scoreThresh;
+
+  bool checkTensor;
+  std::string modelPath;
+};
+
+struct ImageShape {
+  uint32_t width;
+  uint32_t height;
+};
+
+class Fairmot {
+ public:
+  APP_ERROR Init(const InitParam &initParam);
+  APP_ERROR DeInit();
+  APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs,
+                      std::vector<MxBase::TensorBase> &outputs);
+  APP_ERROR PostProcess(const std::vector<MxBase::TensorBase> &inputs,
+                        MxBase::JDETracker &tracker, MxBase::Files &file);
+  APP_ERROR Process(const std::string &imgPath);
+  APP_ERROR ReadImageCV(const std::string &imgPath, cv::Mat &imageMat,
+                        ImageShape &imgShape);
+  APP_ERROR ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat,
+                        ImageShape &imgShape);
+  APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat,
+                              MxBase::TensorBase &tensorBase);
+  APP_ERROR GetMetaMap(const ImageShape imgShape,
+                       const ImageShape resizeimgShape,
+                       MxBase::JDETracker &tracker);
+  void WriteResult(const std::string &result_filename,
+                   std::vector<MxBase::Results *> results);
+
+ private:
+  std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
+  std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+  std::shared_ptr<MxBase::FairmotMindsporePost> post_;
+  MxBase::ModelDesc modelDesc_;
+  uint32_t deviceId_ = 0;
+  double inferCostTimeMilliSec = 0.0;
+};
diff --git a/research/cv/fairmot/infer/mxbase/src/PostProcess/FairmotMindsporePost.cpp b/research/cv/fairmot/infer/mxbase/src/PostProcess/FairmotMindsporePost.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3e948d0106a1ced0caaaa03145ee0f1a550e8f13
--- /dev/null
+++ b/research/cv/fairmot/infer/mxbase/src/PostProcess/FairmotMindsporePost.cpp
@@ -0,0 +1,1245 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define BOOST_BIND_GLOBAL_PLACEHOLDERS
+#include "FairmotMindsporePost.h"
+
+#include <dirent.h>
+#include <float.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <boost/property_tree/json_parser.hpp>
+#include <opencv2/core/hal/hal.hpp>
+#include <opencv2/opencv.hpp>
+
+#include "MxBase/CV/ObjectDetection/Nms/Nms.h"
+#include "acl/acl.h"
+
+namespace {
+// Output Tensor
+const int OUTPUT_TENSOR_SIZE = 2;
+const int OUTPUT_ID_FEATURE_INDEX = 0;
+const int OUTPUT_DETS_INDEX = 1;
+
+const int OUTPUT_ID_FEATURE_SIZE = 2;
+const int OUTPUT_DETS_SIZE = 3;
+const float CONF_THRES = 0.3;
+const int TRACK_BUFFER = 30;
+const int K = 500;
+const int TrackState_New = 0;
+const int TrackState_Tracked = 1;
+const int TrackState_Lost = 2;
+const int TrackState_Removed = 3;
+const int MIN_BOX_AREA = 100;
+}  // namespace
+
+namespace MxBase {
+BaseTrack basetrack;
+KalmanFilter kalmanfilter;
+void FairmotMindsporePost::get_result(int *collist, int cols, cost *d, cost min,
+                                      int rows, int &endofpath, row *colsol,
+                                      cost **assigncost, cost *v, int *pred) {
+  col low = 0, up = 0;  // columns in 0..low-1 are ready, now none.
+  int k, j, i, j1;
+  cost h, v2;
+  boolean unassignedfound = FALSE;
+  do {
+    if (up == low) {
+      min = d[collist[up++]];
+      for (k = up; k < cols; k++) {
+        j = collist[k];
+        h = d[j];
+        if (h <= min) {
+          if (h < min) {
+            up = low;  // restart list at index low.
+            min = h;
+          }
+          collist[k] = collist[up];
+          collist[up++] = j;
+        }
+      }
+      for (k = low; k < up; k++)
+        if (colsol[collist[k]] < 0) {
+          endofpath = collist[k];
+          unassignedfound = TRUE;
+          break;
+        }
+    }
+    if (!unassignedfound) {
+      j1 = collist[low];
+      low++;
+      i = colsol[j1];
+      if (i > rows) {
+        i = 0;
+      }
+      h = assigncost[i][j1] - v[j1] - min;
+      for (k = up; k < cols; k++) {
+        j = collist[k];
+        v2 = assigncost[i][j] - v[j] - h;
+        if (v2 < d[j]) {
+          pred[j] = i;
+          if (v2 == min) {
+            if (colsol[j] < 0) {
+              endofpath = j;
+              unassignedfound = TRUE;
+              break;
+            } else {
+              collist[k] = collist[up];
+              collist[up++] = j;
+            }
+          }
+          d[j] = v2;
+        }
+      }
+    }
+  } while (!unassignedfound);
+}
+void FairmotMindsporePost::func(int &numfree, int *free, cost **assigncost,
+                                row *colsol, cost *v, col *rowsol, int cols) {
+  int j2, loopcnt = 0;  // do-loop to be done twice.
+  do {
+    loopcnt++;
+    int k = 0;
+    row prvnumfree;
+    prvnumfree = numfree;
+    numfree = 0;
+    while (k < prvnumfree) {
+      int i = free[k++];
+      int umin = assigncost[i][0] - v[0];
+      int j1 = 0;
+      int usubmin = BIG;
+      for (int j = 1; j < cols; j++) {
+        int h = assigncost[i][j] - v[j];
+        if (h < usubmin) {
+          if (h >= umin) {
+            usubmin = h;
+            j2 = j;
+          } else {
+            usubmin = umin;
+            umin = h;
+            j2 = j1;
+            j1 = j;
+          }
+        }
+      }
+      int i0 = colsol[j1];
+      if (umin < usubmin) {
+        v[j1] = v[j1] - (usubmin - umin);
+      } else {
+        if (i0 > -1) {
+          j1 = j2;
+          i0 = colsol[j2];
+        }
+      }
+      rowsol[i] = j1;
+      colsol[j1] = i;
+      if (i0 > -1) {
+        if (umin < usubmin) {
+          free[--k] = i0;
+        } else {
+          free[numfree++] = i0;
+        }
+      }
+    }
+  } while (loopcnt < 2);  // repeat once.
+}
+
+void FairmotMindsporePost::lap(cost **assigncost, col *rowsol, row *colsol,
+                               cost *u, cost *v, int rows, int cols) {
+  int i, numfree = 0, f, *pred = new row[rows], *free = new row[rows], j, j1,
+         endofpath, *collist = new col[cols], *matches = new col[cols];
+  cost min, *d = new cost[rows];
+  for (i = 0; i < rows; i++) matches[i] = 0;
+  for (j = cols; j--;) {  // reverse order gives better results.
+    row imin = 0;
+    min = assigncost[0][j];
+    for (i = 1; i < rows; i++)
+      if (assigncost[i][j] < min) {
+        min = assigncost[i][j];
+        imin = i;
+      }
+    v[j] = min;
+    if (++matches[imin] == 1) {
+      rowsol[imin] = j;
+      colsol[j] = imin;
+    } else if (v[j] < v[rowsol[imin]]) {
+      int j_1 = rowsol[imin];
+      rowsol[imin] = j;
+      colsol[j] = imin;
+      colsol[j_1] = -1;
+    } else {
+      colsol[j] = -1;
+    }
+  }
+  for (i = 0; i < rows; i++)
+    if (matches[i] == 0) {
+      free[numfree++] = i;
+    } else if (matches[i] == 1) {
+      j1 = rowsol[i];
+      min = BIG;
+      for (j = 0; j < cols; j++)
+        if (j != j1)
+          if (assigncost[i][j] - v[j] < min) min = assigncost[i][j] - v[j];
+      v[j1] = v[j1] - min;
+    }
+  func(numfree, free, assigncost, colsol, v, rowsol, cols);
+  for (f = 0; f < numfree; f++) {
+    row freerow;
+    freerow = free[f];  // start row of augmenting path.
+    for (j = cols; j--;) {
+      d[j] = assigncost[freerow][j] - v[j];
+      pred[j] = freerow;
+      collist[j] = j;  // init column list.
+    }
+    get_result(collist, cols, d, min, rows, endofpath, colsol, assigncost, v,
+               pred);
+    do {
+      i = pred[endofpath];
+      colsol[endofpath] = i;
+      j1 = endofpath;
+      endofpath = rowsol[i];
+      rowsol[i] = j1;
+    } while (i != freerow);
+  }
+  delete[] matches;
+}
+
+int BaseTrack::next_id() {
+  this->count += 1;
+  return this->count;
+}
+
+Results::Results(uint32_t frame_id, const std::vector<cv::Mat> &online_tlwhs,
+                 const std::vector<int> &online_ids) {
+  this->frame_id = frame_id;
+  this->online_tlwhs = online_tlwhs;
+  this->online_ids = online_ids;
+}
+
+JDETracker::JDETracker(uint32_t frame_rate) {
+  this->det_thresh = CONF_THRES;
+  this->buffer_size = static_cast<int>(frame_rate / 30.0 * TRACK_BUFFER);
+  this->max_time_lost = this->buffer_size;
+  this->max_per_image = K;
+  KalmanFilter kalman_filter;
+  this->kalman_filter = kalman_filter;
+}
+STack::STack() {}
+STack::STack(cv::Mat tlwh, float score, cv::Mat temp_feat,
+             uint32_t buffer_size) {
+  tlwh.convertTo(this->tlwh, CV_64FC1);
+  this->is_activated = false;
+  this->score = score;
+  this->tracklet_len = 0;
+  this->update_features(temp_feat);
+  this->alpha = 0.9;
+}
+cv::Mat STack::gettlwh() {
+  if (this->mean.rows == 0) {
+    return this->tlwh.clone();
+  } else {
+    cv::Mat ret =
+        this->mean(cv::Range(0, this->mean.rows), cv::Range(0, 4)).clone();
+    ret.at<double>(0, 2) *= ret.at<double>(0, 3);
+    for (size_t i = 0; i < 2; i++) {
+      ret.at<double>(0, i) -= ret.at<double>(0, i + 2) / 2;
+    }
+    return ret;
+  }
+}
+cv::Mat STack::tlbr() {
+  cv::Mat ret = this->gettlwh();
+  for (size_t i = 0; i < 2; i++) {
+    ret.at<double>(0, i + 2) += ret.at<double>(0, i);
+  }
+  return ret;
+}
+cv::Mat STack::tlwh_to_xyah(cv::Mat tlwh) {
+  cv::Mat ret = tlwh;
+  for (size_t i = 0; i < 2; i++) {
+    ret.at<double>(0, i) += ret.at<double>(0, i + 2) / 2;
+  }
+  ret.at<double>(0, 2) /= ret.at<double>(0, 3);
+  return ret;
+}
+void STack::activate(const KalmanFilter &kalman_filter, uint32_t frame_id) {
+  this->kalman_filter = kalman_filter;
+  this->track_id = basetrack.next_id();
+  this->kalman_filter.initiate(this->tlwh_to_xyah(this->tlwh), this->mean,
+                               this->covariance);
+  this->tracklet_len = 0;
+  this->state = TrackState_Tracked;
+  this->is_activated = false;
+  if (frame_id == 1) {
+    this->is_activated = true;
+  }
+  this->frame_id = frame_id;
+  this->start_frame = frame_id;
+}
+void STack::update_features(cv::Mat temp_feat) {
+  cv::Mat feat;
+  cv::normalize(temp_feat, feat);
+  this->curr_feat = feat;
+  if (this->smooth_feat.empty()) {
+    this->smooth_feat = feat;
+  } else {
+    this->smooth_feat =
+        this->alpha * this->smooth_feat + (1 - this->alpha) * feat;
+  }
+  this->features.push_back(feat);
+  cv::normalize(this->smooth_feat, this->smooth_feat);
+}
+
+void STack::update(STack new_track, int frame_id, bool update_feature) {
+  this->frame_id = frame_id;
+  this->tracklet_len += 1;
+  cv::Mat new_tlwh = new_track.gettlwh();
+  this->kalman_filter.update(this->mean, this->covariance,
+                             this->tlwh_to_xyah(new_tlwh));
+  this->state = TrackState_Tracked;
+  this->is_activated = true;
+  this->score = new_track.score;
+  if (update_feature == true) {
+    this->update_features(new_track.curr_feat);
+  }
+}
+
+void STack::re_activate(STack new_track, int frame_id, bool new_id) {
+  this->kalman_filter.update(this->mean, this->covariance,
+                             this->tlwh_to_xyah(new_track.gettlwh()));
+  this->update_features(new_track.curr_feat);
+  this->tracklet_len = 0;
+  this->state = TrackState_Tracked;
+  this->is_activated = true;
+  this->frame_id = frame_id;
+  if (new_id) {
+    this->track_id = basetrack.next_id();
+  }
+}
+
+void KalmanFilter::initiate(cv::Mat measurement, cv::Mat &mean,
+                            cv::Mat &covariance) {
+  cv::Mat mean_pos = measurement;
+  cv::Mat mean_vel = cv::Mat::zeros(mean_pos.rows, mean_pos.cols, CV_64FC1);
+  hconcat(mean_pos, mean_vel, mean);
+  double tmp[1][8] = {
+      2 * this->std_weight_position * measurement.at<double>(0, 3),
+      2 * this->std_weight_position * measurement.at<double>(0, 3),
+      1e-2,
+      2 * this->std_weight_position * measurement.at<double>(0, 3),
+      10 * this->std_weight_velocity * measurement.at<double>(0, 3),
+      10 * this->std_weight_velocity * measurement.at<double>(0, 3),
+      1e-5,
+      10 * this->std_weight_velocity * measurement.at<double>(0, 3)};
+  cv::Mat std = cv::Mat(1, 8, CV_64FC1, tmp);
+  std = std.mul(std);
+  covariance = cv::Mat::eye(std.cols, std.cols, CV_64FC1);
+  for (size_t i = 0; i < std.cols; i++)
+    covariance.at<double>(i, i) = std.at<double>(0, i);
+}
+void KalmanFilter::multi_predict(cv::Mat &mean,
+                                 std::vector<cv::Mat> &covariance) {
+  cv::Mat std_pos(4, mean.rows, CV_64FC1);
+  cv::Mat std_vel(4, mean.rows, CV_64FC1);
+  for (size_t i = 0; i < 4; i++)
+    for (size_t j = 0; j < mean.rows; j++)
+      if (i == 2) {
+        std_pos.at<double>(i, j) = 1e-2;
+        std_vel.at<double>(i, j) = 1e-5;
+      } else {
+        std_pos.at<double>(i, j) =
+            this->std_weight_position * mean.at<double>(j, 3);
+        std_vel.at<double>(i, j) =
+            this->std_weight_velocity * mean.at<double>(j, 3);
+      }
+  cv::Mat sqr;
+  vconcat(std_pos, std_vel, sqr);
+  sqr = sqr.mul(sqr).t();
+  std::vector<cv::Mat> motion_cov;
+  for (size_t i = 0; i < mean.rows; i++) {
+    cv::Mat diag = cv::Mat::eye(sqr.cols, sqr.cols, CV_64FC1);
+    for (size_t j = 0; j < sqr.cols; j++) {
+      diag.at<double>(j, j) = sqr.at<double>(i, j);
+    }
+    motion_cov.push_back(diag);
+  }
+  mean = mean * this->motion_mat.t();
+  std::vector<cv::Mat> left;
+  for (size_t i = 0; i < covariance.size(); i++) {
+    left.push_back(this->motion_mat * covariance[i]);
+  }
+  for (size_t i = 0; i < covariance.size(); i++) {
+    covariance[i] = left[i] * this->motion_mat.t() + motion_cov[i];
+  }
+}
+KalmanFilter::KalmanFilter() {
+  this->ndim = 4;
+  this->dt = 1.0;
+  this->motion_mat = cv::Mat::eye(2 * (this->ndim), 2 * (this->ndim), CV_64FC1);
+  for (size_t i = 0; i < this->ndim; ++i) {
+    this->motion_mat.at<double>(i, this->ndim + i) = this->dt;
+  }
+  this->update_mat = cv::Mat::eye(this->ndim, 2 * (this->ndim), CV_64FC1);
+  this->std_weight_position = 1.0 / 20;
+  this->std_weight_velocity = 1.0 / 160;
+}
+cv::Mat KalmanFilter::GatingDistance(cv::Mat mean, cv::Mat covariance,
+                                     cv::Mat measurements, bool only_position,
+                                     const std::string &metric) {
+  this->project(mean, covariance);
+  cv::Mat d(measurements.rows, measurements.cols, CV_64FC1);
+  for (size_t i = 0; i < measurements.rows; i++) {
+    d.row(i) = measurements.row(i) - mean;
+  }
+  cv::Mat cholesky_factor = cv::Mat::zeros(covariance.size(), CV_64F);
+  for (int i = 0; i < covariance.rows; ++i) {
+    int j;
+    double sum;
+    for (j = 0; j < i; ++j) {
+      sum = 0;
+      for (int k = 0; k < j; ++k) {
+        sum +=
+            cholesky_factor.at<double>(i, k) * cholesky_factor.at<double>(j, k);
+      }
+      cholesky_factor.at<double>(i, j) = (covariance.at<double>(i, j) - sum) /
+                                         cholesky_factor.at<double>(j, j);
+    }
+    sum = 0;
+    assert(i == j);
+    for (int k = 0; k < j; ++k) {
+      sum +=
+          cholesky_factor.at<double>(j, k) * cholesky_factor.at<double>(j, k);
+    }
+    cholesky_factor.at<double>(j, j) = sqrt(covariance.at<double>(j, j) - sum);
+  }
+  cv::Mat z;
+  cv::solve(cholesky_factor, d.t(), z);
+  z = z.mul(z);
+  cv::Mat squared_maha(1, z.cols, CV_64FC1);
+  for (size_t i = 0; i < z.cols; i++) {
+    double sum = 0;
+    for (size_t t = 0; t < z.rows; t++) {
+      sum += z.at<double>(t, i);
+    }
+    squared_maha.at<double>(0, i) = sum;
+  }
+  return squared_maha;
+}
+
+void KalmanFilter::project(cv::Mat &mean, cv::Mat &covariance) {
+  cv::Mat std(1, 4, CV_64FC1);
+  for (size_t i = 0; i < 4; i++)
+    if (i == 2)
+      std.at<double>(0, i) = 1e-1;
+    else
+      std.at<double>(0, i) = this->std_weight_position * mean.at<double>(0, 3);
+  std = std.mul(std);
+  cv::Mat innovation_cov = cv::Mat::eye(std.cols, std.cols, CV_64FC1);
+  for (size_t j = 0; j < std.cols; j++) {
+    innovation_cov.at<double>(j, j) = std.at<double>(0, j);
+  }
+  cv::Mat tmp(mean.rows, this->update_mat.rows, CV_64FC1);
+  for (size_t i = 0; i < this->update_mat.rows; i++) {
+    tmp.at<double>(0, i) = this->update_mat.row(i).dot(mean);
+  }
+  mean = tmp;
+  covariance =
+      this->update_mat * covariance * this->update_mat.t() + innovation_cov;
+}
+
+void KalmanFilter::update(cv::Mat &mean, cv::Mat &covariance,
+                          cv::Mat measurement) {
+  cv::Mat projected_mean = mean.clone();
+  cv::Mat projected_cov = covariance.clone();
+  this->project(projected_mean, projected_cov);
+  cv::Mat chol_factor;
+  this->cholesky_decomposition(projected_cov, chol_factor);
+  cv::Mat b = covariance * this->update_mat.t();
+  b = b.t();
+  cv::Mat kalman_gain(chol_factor.rows, b.cols, CV_64FC1);
+  for (size_t i = 0; i < b.cols; i++) {
+    f64_vec_t x = {0, 0, 0, 0};
+    f64_vec_t *f_x = &x;
+    cv::Mat mat_b(b.rows, 1, CV_64FC1);
+    mat_b = b.col(i);
+    this->chol_subtitute(chol_factor, mat_b, f_x, chol_factor.rows);
+    for (size_t j = 0; j < chol_factor.rows; j++) {
+      if ((*f_x)[j] < 0.001)
+        kalman_gain.at<double>(j, i) = 0;
+      else
+        kalman_gain.at<double>(j, i) = (*f_x)[j];
+    }
+  }
+  kalman_gain = kalman_gain.t();
+  cv::Mat innovation = measurement - projected_mean;
+  mean += innovation * kalman_gain.t();
+  covariance -= kalman_gain * projected_cov * kalman_gain.t();
+}
+
+void KalmanFilter::chol_subtitute(cv::Mat chol_factor, cv::Mat mat_b,
+                                  f64_vec_t *f_x, int n) {
+  f64_mat_t L;
+  f64_vec_t b;
+  for (int i = 0; i < n; ++i)
+    for (int j = 0; j < n; ++j) L[i][j] = chol_factor.at<double>(i, j);
+  for (int i = 0; i < n; ++i) b[i] = mat_b.at<double>(i, 0);
+  f64_mat_t *f_L = &L;
+  f64_vec_t *f_b = &b;
+  int i, j;
+  double f_sum;
+
+  double *pX;
+  double *pXj;
+  const double *pL;
+  const double *pB;
+  /** @llr - This function shall solve the unknown vector f_x given a matrix f_L
+   * and a vector f_b with the relation f_L*fL'*f_x = f_b.*/
+
+  pX = &(*f_x)[0];
+  pB = &(*f_b)[0];
+  /* Copy f_b into f_x */
+  for (i = 0u; i < n; i++) {
+    (*pX) = (*pB);
+    pX++;
+    pB++;
+  }
+  /* Solve Ly = b  for y */
+  pXj = &(*f_x)[0];
+  for (i = 0u; i < n; i++) {
+    double *pXi;
+    double fLii;
+    f_sum = (*f_x)[i];
+    fLii = (*f_L)[i][i];
+    pXi = &(*f_x)[0];
+    pL = &(*f_L)[i][0];
+
+    for (j = 0u; j < i; j++) {
+      f_sum -= (*pL) * (*pXi);
+      pL++;
+      pXi++;
+    }
+    (*pXj) = f_sum / fLii;
+    pXj++;
+  }
+  /* Solve L'x = y for x */
+  for (i = 1u; i <= n; i++) {
+    f_sum = (*f_x)[n - i];
+    pXj = &(*f_x)[n - i + 1u];
+    pL = &(*f_L)[n - i + 1u][n - i];
+    for (j = n - i + 1u; j < n; j++) {
+      f_sum -= (*pL) * (*pXj);
+      pXj++;
+      pL += n; /* PRQA S 0488 */
+    }
+    (*f_x)[n - i] = f_sum / (*f_L)[n - i][n - i];
+  }
+}
+
+void KalmanFilter::cholesky_decomposition(const cv::Mat &A, cv::Mat &L) {
+  L = cv::Mat::zeros(A.size(), CV_64F);
+  int rows = A.rows;
+
+  for (int i = 0; i < rows; ++i) {
+    int j;
+    float sum;
+
+    for (j = 0; j < i; ++j) {
+      sum = 0;
+      for (int k = 0; k < j; ++k) {
+        sum += L.at<double>(i, k) * L.at<double>(j, k);
+      }
+      L.at<double>(i, j) = (A.at<double>(i, j) - sum) / L.at<double>(j, j);
+    }
+    sum = 0;
+    assert(i == j);
+    for (int k = 0; k < j; ++k) {
+      sum += L.at<double>(j, k) * L.at<double>(j, k);
+    }
+    L.at<double>(j, j) = sqrt(A.at<double>(j, j) - sum);
+  }
+}
+
+FairmotMindsporePost &FairmotMindsporePost::operator=(
+    const FairmotMindsporePost &other) {
+  if (this == &other) {
+    return *this;
+  }
+  PostProcessBase::operator=(other);
+  return *this;
+}
+
+APP_ERROR FairmotMindsporePost::Init(
+    const std::map<std::string, std::shared_ptr<void>> &postConfig) {
+  LogInfo << "Begin to initialize FairmotMindsporePost.";
+  APP_ERROR ret = PostProcessBase::Init(postConfig);
+  if (ret != APP_ERR_OK) {
+    LogError << GetError(ret) << "Fail to superinit  in PostProcessBase.";
+    return ret;
+  }
+
+  LogInfo << "End to initialize FairmotMindsporePost.";
+  return APP_ERR_OK;
+}
+
+APP_ERROR FairmotMindsporePost::DeInit() {
+  LogInfo << "Begin to deinitialize FairmotMindsporePost.";
+  LogInfo << "End to deinitialize FairmotMindsporePost.";
+  return APP_ERR_OK;
+}
+
+bool FairmotMindsporePost::IsValidTensors(
+    const std::vector<TensorBase> &tensors) const {
+  if (tensors.size() < OUTPUT_TENSOR_SIZE) {
+    LogError << "The number of tensor (" << tensors.size()
+             << ") is less than required (" << OUTPUT_TENSOR_SIZE << ")";
+    return false;
+  }
+
+  auto idFeatureShape = tensors[OUTPUT_ID_FEATURE_INDEX].GetShape();
+  if (idFeatureShape.size() != OUTPUT_ID_FEATURE_SIZE) {
+    LogError << "The number of tensor[" << OUTPUT_ID_FEATURE_INDEX
+             << "] dimensions (" << idFeatureShape.size()
+             << ") is not equal to (" << OUTPUT_ID_FEATURE_SIZE << ")";
+    return false;
+  }
+
+  auto detsShape = tensors[OUTPUT_DETS_INDEX].GetShape();
+  if (detsShape.size() != OUTPUT_DETS_SIZE) {
+    LogError << "The number of tensor[" << OUTPUT_DETS_INDEX << "] dimensions ("
+             << detsShape.size() << ") is not equal to ("
+             << OUTPUT_ID_FEATURE_SIZE << ")";
+    return false;
+  }
+
+  return true;
+}
+
+void FairmotMindsporePost::TransformPreds(const cv::Mat &coords,
+                                          MxBase::JDETracker tracker,
+                                          cv::Mat &target_coords) {
+  target_coords = cv::Mat::zeros(coords.rows, coords.cols, CV_32FC1);
+  float scale = tracker.s;
+  uint32_t h = tracker.out_height;
+  uint32_t w = tracker.out_width;
+  float scale_value[1][2] = {scale, scale};
+  cv::Mat scale_tmp = cv::Mat(1, 2, CV_32FC1, scale_value);
+  float src_w = scale_tmp.at<float>(0, 0);
+  uint32_t dst_w = w;
+  uint32_t dst_h = h;
+  float sn = 0.0;
+  float cs = 1.0;
+  uint32_t src_point_0 = 0;
+  float src_point_1 = src_w * (-0.5);
+  float src_dir_value[1][2] = {src_point_0 * cs - src_point_1 * sn,
+                               src_point_0 * sn + src_point_1 * cs};
+  cv::Mat src_dir = cv::Mat(1, 2, CV_32FC1, src_dir_value);
+  float dst_dir_value[1][2] = {0, static_cast<float>(dst_w * (-0.5))};
+  cv::Mat dst_dir = cv::Mat(1, 2, CV_32FC1, dst_dir_value);
+  cv::Mat src = cv::Mat::zeros(3, 2, CV_32FC1);
+  cv::Mat dst = cv::Mat::zeros(3, 2, CV_32FC1);
+  float center_value[1][2] = {tracker.c[0], tracker.c[1]};
+  cv::Mat center = cv::Mat(1, 2, CV_32FC1, center_value);
+  cv::Mat shift = cv::Mat::zeros(1, 2, CV_32FC1);
+  cv::Mat src_0 = scale_tmp.mul(shift) + center;
+  cv::Mat src_1 = scale_tmp.mul(shift) + center + src_dir;
+  cv::Mat direct = src_0 - src_1;
+  float direct_tmp_value[1][2] = {-direct.at<float>(0, 1),
+                                  direct.at<float>(0, 0)};
+  cv::Mat direct_tmp = cv::Mat(1, 2, CV_32FC1, direct_tmp_value);
+  cv::Mat src_2 = src_1 + direct_tmp;
+  float dst_0_value[1][2] = {static_cast<float>(dst_w * (0.5)),
+                             static_cast<float>(dst_h * (0.5))};
+  cv::Mat dst_0 = cv::Mat(1, 2, CV_32FC1, dst_0_value);
+  float dst_1_value[1][2] = {
+      static_cast<float>(dst_w * (0.5)) + dst_dir.at<float>(0, 0),
+      static_cast<float>(dst_h * (0.5)) + dst_dir.at<float>(0, 1)};
+  cv::Mat dst_1 = cv::Mat(1, 2, CV_32FC1, dst_1_value);
+  direct = dst_0 - dst_1;
+  float direct_value[1][2] = {-direct.at<float>(0, 1), direct.at<float>(0, 0)};
+  direct_tmp = cv::Mat(1, 2, CV_32FC1, direct_value);
+  cv::Mat dst_2 = dst_1 + direct_tmp;
+  for (size_t y = 0; y < src.cols; ++y) {
+    src.at<float>(0, y) = src_0.at<float>(0, y);
+    src.at<float>(1, y) = src_1.at<float>(0, y);
+    src.at<float>(2, y) = src_2.at<float>(0, y);
+    dst.at<float>(0, y) = dst_0.at<float>(0, y);
+    dst.at<float>(1, y) = dst_1.at<float>(0, y);
+    dst.at<float>(2, y) = dst_2.at<float>(0, y);
+  }
+  cv::Mat trans(2, 3, CV_32FC1);
+  trans = cv::getAffineTransform(dst, src);
+  trans.convertTo(trans, CV_32F);
+  for (size_t x = 0; x < coords.rows; ++x) {
+    float pt_value[3][1] = {coords.at<float>(x, 0), coords.at<float>(x, 1),
+                            1.0};
+    cv::Mat pt = cv::Mat(3, 1, CV_32FC1, pt_value);
+    for (size_t y = 0; y < 2; ++y) {
+      cv::Mat new_pt = trans * pt;
+      target_coords.at<float>(x, y) = new_pt.at<float>(y);
+    }
+  }
+}
+
+void FairmotMindsporePost::PostProcess(cv::Mat &det,
+                                       const MxBase::JDETracker &tracker) {
+  cv::Mat coords_0 = det(cv::Range(0, det.rows), cv::Range(0, 2));
+  cv::Mat coords_1 = det(cv::Range(0, det.rows), cv::Range(2, 4));
+  cv::Mat target_coords_0;
+  cv::Mat target_coords_1;
+  TransformPreds(coords_0, tracker, target_coords_0);
+  TransformPreds(coords_1, tracker, target_coords_1);
+  for (size_t x = 0; x < det.rows; ++x) {
+    for (size_t y = 0; y < 4; ++y) {
+      if (y < 2) {
+        det.at<float>(x, y) = target_coords_0.at<float>(x, y);
+      } else {
+        det.at<float>(x, y) = target_coords_1.at<float>(x, y - 2);
+      }
+    }
+  }
+  det = det(cv::Range(0, det.rows), cv::Range(0, 5));
+}
+void FairmotMindsporePost::TensorBaseToCVMat(cv::Mat &imageMat,
+                                             const MxBase::TensorBase &tensor) {
+  TensorBase Data = tensor;
+  uint32_t outputModelWidth;
+  uint32_t outputModelHeight;
+  auto shape = Data.GetShape();
+  if (shape.size() == 2) {
+    outputModelWidth = shape[0];
+    outputModelHeight = shape[1];
+  } else {
+    outputModelWidth = shape[1];
+    outputModelHeight = shape[2];
+  }
+  auto *data = reinterpret_cast<float *>(GetBuffer(Data, 0));
+  cv::Mat dataMat(outputModelWidth, outputModelHeight, CV_32FC1);
+  for (size_t x = 0; x < outputModelWidth; ++x) {
+    for (size_t y = 0; y < outputModelHeight; ++y) {
+      dataMat.at<float>(x, y) = data[x * outputModelHeight + y];
+    }
+  }
+  imageMat = dataMat.clone();
+}
+
+std::vector<cv::Mat> FairmotMindsporePost::get_lap(cost **assigncost,
+                                                   col *rowsol, row *colsol,
+                                                   cost *u, cost *v, int row,
+                                                   int col, int dim) {
+  std::vector<cv::Mat> results;
+  lap(assigncost, rowsol, colsol, u, v, dim, dim);
+  cv::Mat x(1, row, CV_32FC1), y(1, col, CV_32FC1);
+  for (int i = 0; i < row; i++)
+    x.at<float>(0, i) = rowsol[i] > (col - 1) ? (-1) : rowsol[i];
+  for (int j = 0; j < col; j++)
+    y.at<float>(0, j) = colsol[j] > (row - 1) ? (-1) : colsol[j];
+  cv::Mat matches(0, 2, CV_32FC1);
+  for (size_t i = 0; i < x.cols; i++) {
+    if (x.at<float>(0, i) >= 0) {
+      cv::Mat tmp(1, 2, CV_32FC1);
+      tmp.at<float>(0, 0) = i;
+      tmp.at<float>(0, 1) = x.at<float>(0, i);
+      matches.push_back(tmp);
+    }
+  }
+  std::vector<int> a, b;
+  for (size_t i = 0; i < x.cols; i++)
+    if (x.at<float>(0, i) < 0) a.push_back(i);
+  for (size_t i = 0; i < y.cols; i++)
+    if (y.at<float>(0, i) < 0) b.push_back(i);
+  cv::Mat unmatched_a(1, a.size(), CV_32FC1),
+      unmatched_b(1, b.size(), CV_32FC1);
+  for (size_t i = 0; i < a.size(); i++) unmatched_a.at<float>(0, i) = a[i];
+  for (size_t i = 0; i < b.size(); i++) unmatched_b.at<float>(0, i) = b[i];
+  results.push_back(matches);
+  results.push_back(unmatched_a);
+  results.push_back(unmatched_b);
+  return results;
+}
+
+std::vector<cv::Mat> FairmotMindsporePost::LinearAssignment(cv::Mat cost_matrix,
+                                                            float thresh) {
+  if (cost_matrix.rows == 0) {
+    std::vector<cv::Mat> results;
+    cv::Mat matches(0, 2, CV_32FC1), u_track,
+        u_detection(1, cost_matrix.cols, CV_32FC1);
+    for (size_t i = 0; i < cost_matrix.cols; ++i)
+      u_detection.at<float>(0, i) = i;
+    results.push_back(matches);
+    results.push_back(u_track);
+    results.push_back(u_detection);
+    return results;
+  } else {
+    int row = cost_matrix.rows, col = cost_matrix.cols;
+    int N = row > col ? row : col;
+    cv::Mat cost_c_extended = cv::Mat::ones(2 * N, 2 * N, CV_64FC1);
+    cost_c_extended *= thresh;
+    if (row != col) {
+      double min = 0, max = 0;
+      double *minp = &min, *maxp = &max;
+      cv::minMaxIdx(cost_matrix, minp, maxp);
+      for (size_t i = 0; i < N; i++)
+        for (size_t j = 0; j < N; j++)
+          cost_c_extended.at<double>(i, j) = (*maxp) + thresh + 1;
+    }
+    for (size_t i = 0; i < row; i++)
+      for (size_t j = 0; j < col; j++)
+        cost_c_extended.at<double>(i, j) = cost_matrix.at<double>(i, j);
+    cost_matrix = cost_c_extended;
+    int dim = 2 * N, *rowsol = new int[dim], *colsol = new int[dim];
+    double **costMatrix = new double *[dim], *u = new double[dim],
+           *v = new double[dim];
+    for (int i = 0; i < dim; i++) costMatrix[i] = new double[dim];
+    for (int i = 0; i < dim; ++i)
+      for (int j = 0; j < dim; ++j)
+        costMatrix[i][j] = cost_matrix.at<double>(i, j);
+    return get_lap(costMatrix, rowsol, colsol, u, v, row, col, dim);
+  }
+}
+
+void FairmotMindsporePost::FuseMotion(MxBase::KalmanFilter &kf,
+                                      cv::Mat &cost_matrix,
+                                      std::vector<STack *> tracks,
+                                      std::vector<STack *> detections,
+                                      bool only_position, float lambda_) {
+  if (cost_matrix.rows != 0) {
+    int gating_dim;
+    if (only_position = false)
+      gating_dim = 2;
+    else
+      gating_dim = 4;
+    float gating_threshold = kalmanfilter.chi2inv95[gating_dim];
+    cv::Mat measurements(detections.size(), 4, CV_64FC1);
+    for (size_t i = 0; i < detections.size(); i++)
+      measurements.row(i) =
+          (*detections[i]).tlwh_to_xyah((*detections[i]).gettlwh()) + 0;
+    for (size_t i = 0; i < tracks.size(); i++) {
+      cv::Mat gating_distance =
+          kf.GatingDistance((*tracks[i]).mean, (*tracks[i]).covariance,
+                            measurements, only_position);
+      for (size_t t = 0; t < gating_distance.cols; t++)
+        if (gating_distance.at<double>(0, t) > gating_threshold)
+          cost_matrix.at<double>(i, t) = DBL_MAX;
+      cost_matrix.row(i) =
+          lambda_ * cost_matrix.row(i) + (1 - lambda_) * gating_distance;
+    }
+  }
+}
+std::vector<STack *> FairmotMindsporePost::JointStracks(
+    std::vector<STack *> tlista, std::vector<STack *> tlistb) {
+  std::vector<STack *> res;
+  std::map<int, int> exists;
+  for (size_t t = 0; t < tlista.size(); t++) {
+    exists[(*tlista[t]).track_id] = 1;
+    res.push_back(tlista[t]);
+  }
+  for (size_t t = 0; t < tlistb.size(); t++) {
+    int tid = (*tlistb[t]).track_id;
+    if (exists[tid] == 0) {
+      exists[tid] = 1;
+      res.push_back(tlistb[t]);
+    }
+  }
+  return res;
+}
+void FairmotMindsporePost::RemoveDuplicateStracks(
+    std::vector<STack *> &stracksa, std::vector<STack *> &stracksb) {
+  cv::Mat pdist = IouDistance(stracksa, stracksb);
+  std::vector<size_t> p, q, dupa, dupb;
+  std::vector<STack *> resa;
+  std::vector<STack *> resb;
+  for (size_t i = 0; i < pdist.rows; i++)
+    for (size_t j = 0; j < pdist.cols; j++)
+      if (pdist.at<double>(i, j) < 0.15) {
+        p.push_back(i);
+        q.push_back(j);
+      }
+  for (size_t i = 0; i < p.size(); i++) {
+    int timep = (*stracksa[p[i]]).frame_id - (*stracksa[p[i]]).start_frame;
+    int timeq = (*stracksb[q[i]]).frame_id - (*stracksb[q[i]]).start_frame;
+    if (timep > timeq) {
+      dupb.push_back(q[i]);
+    } else {
+      dupa.push_back(p[i]);
+    }
+  }
+  for (size_t i = 0; i < stracksa.size(); i++) {
+    if (std::find(dupa.begin(), dupa.end(), i) == dupa.end()) {
+      resa.push_back(stracksa[i]);
+    }
+  }
+  for (size_t i = 0; i < stracksb.size(); i++) {
+    if (std::find(dupb.begin(), dupb.end(), i) == dupb.end()) {
+      resb.push_back(stracksb[i]);
+    }
+  }
+  stracksa = resa;
+  stracksb = resb;
+}
+std::vector<STack *> FairmotMindsporePost::SubStracks(
+    std::vector<STack *> tlista, std::vector<STack *> tlistb) {
+  std::vector<STack *> res;
+  std::map<size_t, STack *> stracks;
+  std::map<size_t, STack *>::iterator it;
+  std::vector<size_t> key;
+  std::vector<size_t> del_key;
+  for (size_t t = 0; t < tlista.size(); t++) {
+    key.push_back((*tlista[t]).track_id);
+    stracks[(*tlista[t]).track_id] = tlista[t];
+  }
+  for (size_t t = 0; t < tlistb.size(); t++) {
+    int tid = (*tlistb[t]).track_id;
+    it = stracks.find(tid);
+    if (it != stracks.end()) {
+      del_key.push_back(tid);
+      stracks.erase(it);
+    }
+  }
+  for (size_t i = 0; i < key.size(); i++) {
+    bool flag = false;
+    for (size_t j = 0; j < del_key.size(); j++) {
+      if (del_key[j] == key[i]) {
+        flag = true;
+      }
+      if (flag == true) {
+        break;
+      }
+    }
+    if (flag == false) {
+      res.push_back(stracks[key[i]]);
+    }
+  }
+  return res;
+}
+cv::Mat FairmotMindsporePost::BboxOverlaps(std::vector<cv::Mat> boxes,
+                                           std::vector<cv::Mat> query_boxes) {
+  int N = boxes.size();
+  int K = query_boxes.size();
+  cv::Mat overlaps = cv::Mat::zeros(N, K, CV_64FC1);
+  for (size_t k = 0; k < K; k++) {
+    double box_area =
+        (query_boxes[k].at<double>(0, 2) - query_boxes[k].at<double>(0, 0) +
+         1) *
+        (query_boxes[k].at<double>(0, 3) - query_boxes[k].at<double>(0, 1) + 1);
+    for (size_t n = 0; n < N; n++) {
+      double iw =
+          std::min(boxes[n].at<double>(0, 2), query_boxes[k].at<double>(0, 2)) -
+          std::max(boxes[n].at<double>(0, 0), query_boxes[k].at<double>(0, 0)) +
+          1;
+      if (iw > 0) {
+        double ih = std::min(boxes[n].at<double>(0, 3),
+                             query_boxes[k].at<double>(0, 3)) -
+                    std::max(boxes[n].at<double>(0, 1),
+                             query_boxes[k].at<double>(0, 1)) +
+                    1;
+        if (ih > 0) {
+          double ua = static_cast<double>(
+              (boxes[n].at<double>(0, 2) - boxes[n].at<double>(0, 0) + 1) *
+                  (boxes[n].at<double>(0, 3) - boxes[n].at<double>(0, 1) + 1) +
+              box_area - iw * ih);
+          overlaps.at<double>(n, k) = iw * ih / ua;
+        }
+      }
+    }
+  }
+  return overlaps;
+}
+cv::Mat FairmotMindsporePost::IouDistance(std::vector<STack *> atracks,
+                                          std::vector<STack *> btracks) {
+  std::vector<cv::Mat> atlbrs;
+  std::vector<cv::Mat> btlbrs;
+  cv::Mat cost_matrix;
+  for (size_t i = 0; i < atracks.size(); i++) {
+    atlbrs.push_back((*atracks[i]).tlbr());
+  }
+  for (size_t i = 0; i < btracks.size(); i++) {
+    btlbrs.push_back((*btracks[i]).tlbr());
+  }
+  cv::Mat ious = cv::Mat::zeros(atlbrs.size(), btlbrs.size(), CV_64FC1);
+  if (!ious.empty()) {
+    ious = BboxOverlaps(atlbrs, btlbrs);
+    cost_matrix = 1 - ious;
+  } else {
+    cost_matrix = cv::Mat::zeros(atlbrs.size(), btlbrs.size(), CV_64FC1);
+  }
+  return cost_matrix;
+}
+void FairmotMindsporePost::MultiPredict(std::vector<STack *> &stracks) {
+  if (stracks.size() > 0) {
+    cv::Mat multi_mean(stracks.size(), (*stracks[0]).mean.cols, CV_64FC1);
+    std::vector<cv::Mat> multi_covariance;
+    for (size_t i = 0; i < stracks.size(); i++) {
+      multi_mean.row(i) = (*stracks[i]).mean.clone() + 0;
+      multi_covariance.push_back((*stracks[i]).covariance);
+    }
+    for (size_t i = 0; i < stracks.size(); i++) {
+      if ((*stracks[i]).state != TrackState_Tracked) {
+        multi_mean.at<double>(i, 7) = 0;
+      }
+    }
+    kalmanfilter.multi_predict(multi_mean, multi_covariance);
+    for (size_t i = 0; i < multi_covariance.size(); i++) {
+      (*stracks[i]).mean = multi_mean.row(i);
+      (*stracks[i]).covariance = multi_covariance[i];
+    }
+  }
+}
+
+std::vector<STack *> FairmotMindsporePost::Get_output_stracks(
+    MxBase::JDETracker &tracker, const std::vector<STack *> &activated_starcks,
+    const std::vector<STack *> &refind_stracks,
+    std::vector<STack *> lost_stracks, std::vector<STack *> removed_stracks) {
+  std::vector<STack *> det_tmp;
+  for (size_t i = 0; i < tracker.tracked_stracks.size(); i++) {
+    if ((*tracker.tracked_stracks[i]).state == TrackState_Tracked) {
+      det_tmp.push_back(tracker.tracked_stracks[i]);
+    }
+  }
+  std::vector<STack *>().swap(tracker.tracked_stracks);
+  tracker.tracked_stracks = det_tmp;
+  std::vector<STack *>().swap(det_tmp);
+  tracker.tracked_stracks =
+      JointStracks(tracker.tracked_stracks, activated_starcks);
+  tracker.tracked_stracks =
+      JointStracks(tracker.tracked_stracks, refind_stracks);
+  tracker.lost_stracks =
+      SubStracks(tracker.lost_stracks, tracker.tracked_stracks);
+  for (size_t i = 0; i < lost_stracks.size(); i++) {
+    tracker.lost_stracks.push_back(lost_stracks[i]);
+  }
+  tracker.lost_stracks =
+      SubStracks(tracker.lost_stracks, tracker.removed_stracks);
+  for (size_t i = 0; i < removed_stracks.size(); i++) {
+    tracker.removed_stracks.push_back(removed_stracks[i]);
+  }
+  std::vector<STack *> output_stracks;
+  RemoveDuplicateStracks(tracker.tracked_stracks, tracker.lost_stracks);
+  // get scores of lost tracks
+  for (size_t i = 0; i < tracker.tracked_stracks.size(); i++) {
+    if ((*tracker.tracked_stracks[i]).is_activated) {
+      output_stracks.push_back(tracker.tracked_stracks[i]);
+    }
+  }
+  return output_stracks;
+}
+
+void FairmotMindsporePost::Get_detections(cv::Mat det,
+                                          std::vector<STack *> &detections,
+                                          cv::Mat id_feature) {
+  if (det.rows > 0) {
+    cv::Mat det_tmp = det(cv::Range(0, det.rows), cv::Range(0, 5)).clone();
+    for (size_t x = 0; x < det.rows; ++x) {
+      cv::Mat tlbrs = det_tmp.row(x);
+      cv::Mat f = id_feature.row(x);
+      cv::Mat ret = tlbrs(cv::Range(0, tlbrs.rows), cv::Range(0, 4));
+      for (size_t y = 0; y < 2; ++y) {
+        ret.at<float>(0, y + 2) -= ret.at<float>(0, y);
+      }
+      STack *stack = new STack(ret, tlbrs.at<float>(0, 4), f, 30);
+      detections.push_back(stack);
+    }
+  }
+}
+
+void FairmotMindsporePost::Get_dists(cv::Mat &dists,
+                                     std::vector<STack *> &detections,
+                                     std::vector<STack *> &strack_pool) {
+  if (dists.rows != 0) {
+    cv::Mat det_features(detections.size(), (*detections[0]).curr_feat.cols,
+                         CV_32FC1);
+    cv::Mat track_features(strack_pool.size(),
+                           (*strack_pool[0]).smooth_feat.cols, CV_32FC1);
+    for (size_t i = 0; i < detections.size(); i++)
+      det_features.row(i) = (*detections[i]).curr_feat + 0;
+    det_features.convertTo(det_features, CV_64F);
+    for (size_t i = 0; i < strack_pool.size(); i++)
+      track_features.row(i) = (*strack_pool[i]).smooth_feat + 0;
+    track_features.convertTo(track_features, CV_64F);
+    // cv::Mat cdist(track_features.rows, det_features.rows, CV_64FC1);
+    for (size_t i = 0; i < dists.rows; i++)
+      for (size_t j = 0; j < dists.cols; j++) {
+        cv::normalize(det_features.row(j), det_features.row(j));
+        cv::normalize(track_features.row(i), track_features.row(i));
+        dists.at<double>(i, j) =
+            1 - track_features.row(i).dot(det_features.row(j));
+      }
+  }
+}
+
+void FairmotMindsporePost::Update_Starcks(
+    const std::vector<STack *> &strack_pool, std::vector<STack *> &detections,
+    const cv::Mat &matches, std::vector<STack *> &activated_starcks,
+    std::vector<STack *> &refind_stracks, const MxBase::JDETracker &tracker) {
+  for (size_t i = 0; i < matches.rows; i++) {
+    STack *track = strack_pool[matches.at<float>(i, 0)];
+    STack *dets = detections[matches.at<float>(i, 1)];
+    if ((*track).state == TrackState_Tracked) {
+      (*track).update((*detections[matches.at<float>(i, 1)]), tracker.frame_id);
+      activated_starcks.push_back(track);
+    } else {
+      (*track).re_activate(*dets, tracker.frame_id, false);
+      refind_stracks.push_back(track);
+    }
+  }
+}
+
+std::vector<STack *> FairmotMindsporePost::ObjectDetectionOutput(
+    const std::vector<TensorBase> &tensors, MxBase::JDETracker &tracker) {
+  tracker.frame_id += 1;
+  cv::Mat id_feature, det, matches, u_track, u_detection, u_unconfirmed, dists;
+  std::vector<STack *> activated_starcks, refind_stracks, lost_stracks,
+      removed_stracks, detections, unconfirmed, tracked_stracks, det_tmp,
+      r_tracked_stracks, output_stracks, strack_pool;
+  TensorBaseToCVMat(det, tensors[1]);
+  TensorBaseToCVMat(id_feature, tensors[0]);
+  PostProcess(det, tracker);
+  cv::Mat scores = det(cv::Range(0, det.rows), cv::Range(4, 5));
+  cv::Mat new_det(0, det.cols, CV_32FC1), new_id(0, id_feature.cols, CV_32FC1);
+  for (size_t x = 0; x < det.rows; ++x) {
+    if (det.at<float>(x, 4) > CONF_THRES) {
+      new_det.push_back(det.row(x));
+      new_id.push_back(id_feature.row(x));
+    }
+  }
+  det = new_det;
+  id_feature = new_id;
+  Get_detections(det, detections, id_feature);
+  for (size_t i = 0; i < tracker.tracked_stracks.size(); i++)
+    if (!(*tracker.tracked_stracks[i]).is_activated)
+      unconfirmed.push_back(tracker.tracked_stracks[i]);
+    else
+      tracked_stracks.push_back(tracker.tracked_stracks[i]);
+  strack_pool = JointStracks(tracked_stracks, tracker.lost_stracks);
+  MultiPredict(strack_pool);
+  dists = cv::Mat::zeros(strack_pool.size(), detections.size(), CV_64FC1);
+  Get_dists(dists, detections, strack_pool);
+  FuseMotion(tracker.kalman_filter, dists, strack_pool, detections);
+  std::vector<cv::Mat> results;
+  results = LinearAssignment(dists, 0.4);
+  matches = results[0];
+  u_track = results[1];
+  u_detection = results[2];
+  Update_Starcks(strack_pool, detections, matches, activated_starcks,
+                 refind_stracks, tracker);
+  for (size_t i = 0; i < u_detection.cols; ++i)
+    det_tmp.push_back(
+        detections[static_cast<int>(u_detection.at<float>(0, i))]);
+  detections = det_tmp;
+  std::vector<STack *>().swap(det_tmp);
+  for (size_t i = 0; i < u_track.cols; ++i)
+    if ((*strack_pool[u_track.at<float>(0, i)]).state == TrackState_Tracked)
+      r_tracked_stracks.push_back(strack_pool[u_track.at<float>(0, i)]);
+  dists = IouDistance(r_tracked_stracks, detections);
+  results = LinearAssignment(dists, 0.5);
+  matches = results[0];
+  u_track = results[1];
+  u_detection = results[2];
+  Update_Starcks(r_tracked_stracks, detections, matches, activated_starcks,
+                 refind_stracks, tracker);
+  for (size_t i = 0; i < u_track.cols; i++) {
+    STack *track = r_tracked_stracks[u_track.at<float>(0, i)];
+    if ((*track).state != TrackState_Lost) {
+      (*track).state = TrackState_Lost;
+      lost_stracks.push_back(track);
+    }
+  }
+  for (size_t i = 0; i < u_detection.cols; ++i)
+    det_tmp.push_back(
+        detections[static_cast<int>(u_detection.at<float>(0, i))]);
+  detections = det_tmp;
+  std::vector<STack *>().swap(det_tmp);
+  dists = IouDistance(unconfirmed, detections);
+  results = LinearAssignment(dists, 0.7);
+  matches = results[0];
+  u_unconfirmed = results[1];
+  u_detection = results[2];
+  for (size_t i = 0; i < matches.rows; i++) {
+    (*unconfirmed[matches.at<float>(i, 0)])
+        .update((*detections[matches.at<float>(i, 1)]), tracker.frame_id);
+    activated_starcks.push_back(unconfirmed[matches.at<float>(i, 0)]);
+  }
+  for (size_t i = 0; i < u_unconfirmed.cols; i++) {
+    STack *track = unconfirmed[u_unconfirmed.at<float>(0, i)];
+    (*track).state = TrackState_Removed;
+    removed_stracks.push_back(track);
+  }
+  for (int j = 0; j < u_detection.cols; j++) {
+    auto inew = u_detection.at<float>(0, j);
+    STack *track = detections[inew];
+    if ((*track).score < tracker.det_thresh) continue;
+    (*track).activate(tracker.kalman_filter, tracker.frame_id);
+    activated_starcks.push_back(track);
+  }
+  for (size_t i = 0; i < tracker.lost_stracks.size(); i++) {
+    if (tracker.frame_id - (*tracker.lost_stracks[i]).frame_id >
+        tracker.max_time_lost) {
+      (*tracker.lost_stracks[i]).state = TrackState_Removed;
+      removed_stracks.push_back(tracker.lost_stracks[i]);
+    }
+  }
+  output_stracks =
+      Get_output_stracks(tracker, activated_starcks, refind_stracks,
+                         lost_stracks, removed_stracks);
+  return output_stracks;
+}
+
+APP_ERROR FairmotMindsporePost::Process(const std::vector<TensorBase> &tensors,
+                                        MxBase::JDETracker &tracker,
+                                        MxBase::Files &file) {
+  LogDebug << "Begin to process FairmotMindsporePost.";
+  auto inputs = tensors;
+  APP_ERROR ret = CheckAndMoveTensors(inputs);
+  if (ret != APP_ERR_OK) {
+    LogError << "CheckAndMoveTensors failed, ret=" << ret;
+    return ret;
+  }
+  std::vector<STack *> online_targets;
+  online_targets = ObjectDetectionOutput(inputs, tracker);
+  std::vector<cv::Mat> online_tlwhs;
+  std::vector<int> online_ids;
+  for (size_t i = 0; i < online_targets.size(); i++) {
+    cv::Mat tlwh = (*online_targets[i]).gettlwh();
+    int tid = (*online_targets[i]).track_id;
+    double tmp = tlwh.at<double>(0, 2) / tlwh.at<double>(0, 3);
+    bool vertical = false;
+    if (tmp > 1.6) {
+      vertical = true;
+    }
+    if ((tlwh.at<double>(0, 2) * tlwh.at<double>(0, 3) > MIN_BOX_AREA) &&
+        vertical == false) {
+      online_tlwhs.push_back(tlwh);
+      online_ids.push_back(tid);
+    }
+  }
+  Results *result = new Results(file.frame_id + 1, online_tlwhs, online_ids);
+  file.results.push_back(result);
+  file.frame_id += 1;
+  LogInfo << "End to process FairmotMindsporePost.";
+  return APP_ERR_OK;
+}
+
+extern "C" {
+std::shared_ptr<MxBase::FairmotMindsporePost> GetObjectInstance() {
+  LogInfo << "Begin to get FairmotMindsporePost instance.";
+  auto instance = std::make_shared<FairmotMindsporePost>();
+  LogInfo << "End to get FairmotMindsporePost Instance";
+  return instance;
+}
+}
+
+}  // namespace MxBase
diff --git a/research/cv/fairmot/infer/mxbase/src/PostProcess/FairmotMindsporePost.h b/research/cv/fairmot/infer/mxbase/src/PostProcess/FairmotMindsporePost.h
new file mode 100644
index 0000000000000000000000000000000000000000..9f630b8becdbf5a20811da32a9d66315b913a222
--- /dev/null
+++ b/research/cv/fairmot/infer/mxbase/src/PostProcess/FairmotMindsporePost.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include <opencv4/opencv2/opencv.hpp>
+typedef int row;
+typedef double f64_mat_t[4][4]; /**< a matrix */
+typedef double f64_vec_t[4];
+#define ROW_TYPE INT
+typedef int col;
+#define COL_TYPE INT
+typedef double cost;
+#define COST_TYPE DOUBLE
+#define BIG 100000
+#if !defined TRUE
+#define TRUE 1
+#endif
+#if !defined FALSE
+#define FALSE 0
+#endif
+
+/*************** DATA TYPES *******************/
+
+typedef int boolean;
+#include "MxBase/CV/Core/DataType.h"
+#include "MxBase/ErrorCode/ErrorCode.h"
+#include "MxBase/PostProcessBases/PostProcessBase.h"
+
+namespace MxBase {
+class Results {
+ public:
+  Results(uint32_t frame_id, const std::vector<cv::Mat> &online_tlwhs,
+          const std::vector<int> &online_ids);
+  uint32_t frame_id;
+  std::vector<cv::Mat> online_tlwhs;
+  std::vector<int> online_ids;
+};
+class TrackState {
+ public:
+  uint32_t New = 0;
+  uint32_t Tracked = 1;
+  uint32_t Lost = 2;
+  uint32_t Removed = 3;
+};
+class BaseTrack {
+ public:
+  uint32_t trackId = 0;
+  bool activated = false;
+  uint32_t base_state;
+  int next_id();
+
+ private:
+  uint32_t count = 0;
+};
+class KalmanFilter {
+ public:
+  std::map<int, float> chi2inv95 = {{1, 3.8415}, {2, 5.9915}, {3, 7.8147},
+                                    {4, 9.4877}, {5, 11.070}, {6, 12.592},
+                                    {7, 14.067}, {8, 15.507}, {9, 16.919}};
+  uint32_t ndim;
+  float dt;
+  KalmanFilter();
+  void chol_subtitute(cv::Mat chol_factor, cv::Mat b, f64_vec_t *f_x, int n);
+  void cholesky_decomposition(const cv::Mat &A, cv::Mat &L);
+  void initiate(cv::Mat measurement, cv::Mat &mean, cv::Mat &covariance);
+  void multi_predict(cv::Mat &mean, std::vector<cv::Mat> &covariance);
+  cv::Mat GatingDistance(cv::Mat mean, cv::Mat covariance, cv::Mat measurements,
+                         bool only_position = false,
+                         const std::string &metric = "maha");
+  void project(cv::Mat &mean, cv::Mat &covariance);
+  void update(cv::Mat &mean, cv::Mat &covariance, cv::Mat measurement);
+
+ private:
+  cv::Mat motion_mat;
+  cv::Mat update_mat;
+  float std_weight_position;
+  float std_weight_velocity;
+};
+
+class STack : public BaseTrack {
+ public:
+  cv::Mat mean, covariance;
+  cv::Mat smooth_feat;
+  cv::Mat curr_feat;
+  bool is_activated;
+  KalmanFilter kalman_filter;
+  float score;
+  float alpha;
+  uint32_t tracklet_len;
+  int track_id;
+  uint32_t state;
+  uint32_t start_frame;
+  uint32_t frame_id;
+  std::vector<cv::Mat> features;
+  STack();
+  STack(cv::Mat tlwh, float score, cv::Mat temp_feat, uint32_t buffer_size);
+  void activate(const KalmanFilter &kalman_filter, uint32_t frame_id);
+  void re_activate(STack new_track, int frame_id, bool new_id = false);
+  cv::Mat tlwh_to_xyah(cv::Mat tlwh);
+  cv::Mat tlbr();
+  cv::Mat gettlwh();
+  void update(STack new_track, int frame_id, bool update_feature = true);
+
+ private:
+  cv::Mat tlwh;
+  void update_features(cv::Mat temp_feat);
+};
+class JDETracker {
+ public:
+  explicit JDETracker(uint32_t frame_rate);
+  std::vector<STack *> tracked_stracks;
+  std::vector<STack *> lost_stracks;
+  std::vector<STack *> removed_stracks;
+  uint32_t frame_id = 0;
+  uint32_t out_height = 0;
+  uint32_t out_width = 0;
+  std::vector<float> c;
+  float det_thresh;
+  float s = 0;
+  int buffer_size;
+  int max_time_lost;
+  int max_per_image;
+  std::string seq;
+  std::string image_file;
+  cv::Mat mean;
+  cv::Mat std;
+  KalmanFilter kalman_filter;
+};
+class Files {
+ public:
+  uint32_t frame_id = 0;
+  std::vector<Results *> results;
+};
+class FairmotMindsporePost : public PostProcessBase {
+ public:
+  FairmotMindsporePost() = default;
+
+  ~FairmotMindsporePost() = default;
+
+  FairmotMindsporePost(const FairmotMindsporePost &other) = default;
+
+  FairmotMindsporePost &operator=(const FairmotMindsporePost &other);
+
+  APP_ERROR Init(
+      const std::map<std::string, std::shared_ptr<void>> &postConfig) override;
+
+  APP_ERROR DeInit() override;
+
+  APP_ERROR Process(const std::vector<TensorBase> &tensors,
+                    MxBase::JDETracker &tracker, MxBase::Files &file);
+
+  bool IsValidTensors(const std::vector<TensorBase> &tensors) const override;
+
+ private:
+  std::vector<STack *> ObjectDetectionOutput(
+      const std::vector<TensorBase> &tensors, MxBase::JDETracker &tracker);
+  void TransformPreds(const cv::Mat &coords, MxBase::JDETracker tracker,
+                      cv::Mat &target_coords);
+  void PostProcess(cv::Mat &det, const MxBase::JDETracker &tracker);
+  void TensorBaseToCVMat(cv::Mat &imageMat, const MxBase::TensorBase &tensor);
+  void FuseMotion(MxBase::KalmanFilter &kalman_filter, cv::Mat &cost_matrix,
+                  std::vector<STack *> tracks, std::vector<STack *> detections,
+                  bool only_position = false, float lambda_ = 0.98);
+  std::vector<cv::Mat> LinearAssignment(cv::Mat cost_matrix, float thresh);
+  void lap(cost **assigncost, col *rowsol, row *colsol, cost *u, cost *v,
+           int row, int col);
+  std::vector<cv::Mat> get_lap(cost **assigncost, col *rowsol, row *colsol,
+                               cost *u, cost *v, int row, int col, int dim);
+  std::vector<STack *> JointStracks(std::vector<STack *> tlista,
+                                    std::vector<STack *> tlistb);
+  std::vector<STack *> SubStracks(std::vector<STack *> tlista,
+                                  std::vector<STack *> tlistb);
+  void RemoveDuplicateStracks(std::vector<STack *> &stracksa,
+                              std::vector<STack *> &stracksb);
+  cv::Mat IouDistance(std::vector<STack *> atracks,
+                      std::vector<STack *> btracks);
+  cv::Mat BboxOverlaps(std::vector<cv::Mat> boxes,
+                       std::vector<cv::Mat> query_boxes);
+  std::vector<STack *> Get_output_stracks(
+      MxBase::JDETracker &tracker,
+      const std::vector<STack *> &activated_starcks,
+      const std::vector<STack *> &refind_stracks,
+      std::vector<STack *> lost_stracks, std::vector<STack *> removed_stracks);
+  void Get_detections(cv::Mat det, std::vector<STack *> &detections,
+                      cv::Mat id_feature);
+  void Get_dists(cv::Mat &dists, std::vector<STack *> &detections,
+                 std::vector<STack *> &strack_pool);
+  void Update_Starcks(const std::vector<STack *> &strack_pool,
+                      std::vector<STack *> &detections, const cv::Mat &matches,
+                      std::vector<STack *> &activated_starcks,
+                      std::vector<STack *> &refind_stracks,
+                      const MxBase::JDETracker &tracker);
+  void get_result(int *collist, int cols, cost *d, cost min, int rows,
+                  int &endofpath, row *colsol, cost **assigncost, cost *v,
+                  int *pred);
+  void func(int &numfree, int *free, cost **assigncost, row *colsol, cost *v,
+            col *rowsol, int cols);
+  void MultiPredict(std::vector<STack *> &stracks);
+  const uint32_t DEFAULT_CLASS_NUM_MS = 80;
+  const float DEFAULT_SCORE_THRESH_MS = 0.7;
+  const float DEFAULT_IOU_THRESH_MS = 0.5;
+  const uint32_t DEFAULT_RPN_MAX_NUM_MS = 1000;
+  const uint32_t DEFAULT_MAX_PER_IMG_MS = 128;
+
+  uint32_t classNum_ = DEFAULT_CLASS_NUM_MS;
+  float scoreThresh_ = DEFAULT_SCORE_THRESH_MS;
+  float iouThresh_ = DEFAULT_IOU_THRESH_MS;
+  uint32_t rpnMaxNum_ = DEFAULT_RPN_MAX_NUM_MS;
+  uint32_t maxPerImg_ = DEFAULT_MAX_PER_IMG_MS;
+};
+
+extern "C" {
+std::shared_ptr<MxBase::FairmotMindsporePost> GetObjectInstance();
+}
+}  // namespace MxBase
diff --git a/research/cv/fairmot/infer/mxbase/src/main.cpp b/research/cv/fairmot/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..94094d6c0eba45329a02269fcf745c2db42874cd
--- /dev/null
+++ b/research/cv/fairmot/infer/mxbase/src/main.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Fairmot.h"
+#include "MxBase/Log/Log.h"
+
+namespace {
+const uint32_t DEVICE_ID = 0;
+}  // namespace
+
+int main(int argc, char *argv[]) {
+  int num = 2;
+  if (argc <= num) {
+    LogWarn << "Please input image path, such as './Fairmot_mindspore "
+               "[om_file_path] [img_path]'.";
+    return APP_ERR_OK;
+  }
+
+  InitParam initParam = {};
+  initParam.deviceId = DEVICE_ID;
+
+  initParam.checkTensor = true;
+
+  initParam.modelPath = argv[1];
+  auto inferFairmot = std::make_shared<Fairmot>();
+  APP_ERROR ret = inferFairmot->Init(initParam);
+  if (ret != APP_ERR_OK) {
+    LogError << "Fairmot init failed, ret=" << ret << ".";
+    return ret;
+  }
+
+  std::string imgPath = argv[2];
+  ret = inferFairmot->Process(imgPath);
+  if (ret != APP_ERR_OK) {
+    LogError << "Fairmot process failed, ret=" << ret << ".";
+    inferFairmot->DeInit();
+    return ret;
+  }
+  inferFairmot->DeInit();
+  return APP_ERR_OK;
+}
diff --git a/research/cv/fairmot/infer/requirements.txt b/research/cv/fairmot/infer/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3d6bc33ef38a6cd374272e7224c301896f0909da
--- /dev/null
+++ b/research/cv/fairmot/infer/requirements.txt
@@ -0,0 +1,4 @@
+motmetrics
+lap
+openpyxl
+cython_bbox
diff --git a/research/cv/fairmot/infer/sdk/api/__init__.py b/research/cv/fairmot/infer/sdk/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/fairmot/infer/sdk/api/infer.py b/research/cv/fairmot/infer/sdk/api/infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d828e015cd52aa75affc0d4a688b10edb87f4fe3
--- /dev/null
+++ b/research/cv/fairmot/infer/sdk/api/infer.py
@@ -0,0 +1,127 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""sdk infer"""
+import json
+import logging
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, MxProtobufIn, StringVector
+
+from config import config as cfg
+
+
+class SdkApi:
+    """sdk api"""
+    INFER_TIMEOUT = cfg.INFER_TIMEOUT
+    STREAM_NAME = cfg.STREAM_NAME
+
+    def __init__(self, pipeline_cfg):
+        self.pipeline_cfg = pipeline_cfg
+        self._stream_api = None
+        self._data_input = None
+        self._device_id = None
+
+    def init(self):
+        """sdk init """
+        with open(self.pipeline_cfg, 'r') as fp:
+            self._device_id = int(
+                json.loads(fp.read())[self.STREAM_NAME]["stream_config"]
+                ["deviceId"])
+            print("The device id: {}.".format(self._device_id))
+
+        # create api
+        self._stream_api = StreamManagerApi()
+
+        # init stream mgr
+        ret = self._stream_api.InitManager()
+        if ret != 0:
+            print(f"Failed to init stream manager, ret={ret}.")
+            return False
+
+        # create streams
+        with open(self.pipeline_cfg, 'rb') as fp:
+            pipe_line = fp.read()
+
+        ret = self._stream_api.CreateMultipleStreams(pipe_line)
+        if ret != 0:
+            print(f"Failed to create stream, ret={ret}.")
+            return False
+
+        self._data_input = MxDataInput()
+
+        return True
+
+    def __del__(self):
+        """del sdk"""
+        if not self._stream_api:
+            return
+
+        self._stream_api.DestroyAllStreams()
+
+    def send_data_input(self, stream_name, plugin_id, input_data):
+        """input data use SendData"""
+        data_input = MxDataInput()
+        data_input.data = input_data
+        unique_id = self._stream_api.SendData(stream_name, plugin_id,
+                                              data_input)
+        if unique_id < 0:
+            logging.error("Fail to send data to stream.")
+            return False
+        return True
+
+    def _send_protobuf(self, stream_name, plugin_id, element_name, buf_type,
+                       pkg_list):
+        """input data use SendProtobuf"""
+        protobuf = MxProtobufIn()
+        protobuf.key = element_name.encode("utf-8")
+        protobuf.type = buf_type
+        protobuf.protobuf = pkg_list.SerializeToString()
+        protobuf_vec = InProtobufVector()
+        protobuf_vec.push_back(protobuf)
+        err_code = self._stream_api.SendProtobuf(stream_name, plugin_id,
+                                                 protobuf_vec)
+        if err_code != 0:
+            logging.error(
+                "Failed to send data to stream, stream_name(%s), plugin_id(%s), element_name(%s), "
+                "buf_type(%s), err_code(%s).", stream_name, plugin_id,
+                element_name, buf_type, err_code)
+            return False
+        return True
+
+    def send_img_input(self, stream_name, plugin_id, element_name, input_data,
+                       img_size):
+        """use cv input to sdk"""
+        vision_list = MxpiDataType.MxpiVisionList()
+        vision_vec = vision_list.visionVec.add()
+        vision_vec.visionInfo.format = 1
+        vision_vec.visionInfo.width = img_size[1]
+        vision_vec.visionInfo.height = img_size[0]
+        vision_vec.visionInfo.widthAligned = img_size[1]
+        vision_vec.visionInfo.heightAligned = img_size[0]
+        vision_vec.visionData.memType = 0
+        vision_vec.visionData.dataStr = input_data
+        vision_vec.visionData.dataSize = len(input_data)
+        buf_type = b"MxTools.MxpiVisionList"
+        return self._send_protobuf(stream_name, plugin_id, element_name,
+                                   buf_type, vision_list)
+
+    def get_result(self, stream_name, out_plugin_id=0):
+        """get_result"""
+        key_vec = StringVector()
+        key_vec.push_back(b'mxpi_modelinfer0')
+        infer_result = self._stream_api.GetProtobuf(
+            stream_name, out_plugin_id, key_vec)
+        result = MxpiDataType.MxpiTensorPackageList()
+        result.ParseFromString(infer_result[0].messageBuf)
+        return result.tensorPackageVec[0].tensorVec[0].dataStr, result.tensorPackageVec[0].tensorVec[1].dataStr
diff --git a/research/cv/fairmot/infer/sdk/config/config.py b/research/cv/fairmot/infer/sdk/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..e483a546ccaace1dc63f1c7c974ce64861377aac
--- /dev/null
+++ b/research/cv/fairmot/infer/sdk/config/config.py
@@ -0,0 +1,25 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""config"""
+
+STREAM_NAME = "im_fairmot"
+MODEL_WIDTH = 1088
+MODEL_HEIGHT = 608
+
+INFER_TIMEOUT = 100000
+
+TENSOR_DTYPE_FLOAT32 = 0
+TENSOR_DTYPE_FLOAT16 = 1
+TENSOR_DTYPE_INT8 = 2
diff --git a/research/cv/fairmot/infer/sdk/config/fairmot.pipeline b/research/cv/fairmot/infer/sdk/config/fairmot.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..d27c6cd8e6b4f8ec35f85415597cce2140f4c31a
--- /dev/null
+++ b/research/cv/fairmot/infer/sdk/config/fairmot.pipeline
@@ -0,0 +1,33 @@
+{
+    "im_fairmot": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_modelinfer0"
+        },
+        "mxpi_modelinfer0": {
+            "props": {
+                "dataSource": "appsrc0",
+                "modelPath": "../convert/fairmot.om",
+                "tensorFormat": "1"
+            },
+            "factory": "mxpi_modelinfer",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_modelinfer0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "factory": "appsink"
+        }
+    }
+}
\ No newline at end of file
diff --git a/research/cv/fairmot/infer/sdk/main.py b/research/cv/fairmot/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bb7f9369e8e0f6a9811bbf711c137380e4e5173
--- /dev/null
+++ b/research/cv/fairmot/infer/sdk/main.py
@@ -0,0 +1,142 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""main"""
+
+import argparse
+import os
+import time
+
+import cv2
+from api.infer import SdkApi
+from config import config as cfg
+
+
+def parser_args():
+    """parser_args"""
+    parser = argparse.ArgumentParser(description="fairmot inference")
+
+    parser.add_argument("--img_path",
+                        type=str,
+                        required=False,
+                        default="/fairmot/data/MOT20",
+                        help="image directory.")
+    parser.add_argument(
+        "--pipeline_path",
+        type=str,
+        required=False,
+        default="/fairmot/infer/sdk/config/fairmot.pipeline",
+        help="image file path. The default is '/fairmot/infer/sdk/config/fairmot.pipeline'. ")
+    parser.add_argument(
+        "--model_type",
+        type=str,
+        required=False,
+        default="dvpp",
+        help=
+        "rgb: high-precision, dvpp: high performance. The default is 'dvpp'.")
+    parser.add_argument(
+        "--infer_mode",
+        type=str,
+        required=False,
+        default="infer",
+        help=
+        "infer:only infer, eval: accuracy evaluation. The default is 'infer'.")
+    parser.add_argument(
+        "--infer_result_dir",
+        type=str,
+        required=False,
+        default="../data/infer_result",
+        help=
+        "cache dir of inference result. The default is '../data/infer_result'."
+    )
+    arg = parser.parse_args()
+    return arg
+
+
+def process_img(img_file):
+    img0 = cv2.imread(img_file)
+    img, _, _, _ = letterbox(img0, height=cfg.MODEL_HEIGHT, width=cfg.MODEL_WIDTH)
+    return img
+
+
+def letterbox(img, height=608, width=1088,
+              color=(127.5, 127.5, 127.5)):
+    """resize a rectangular image to a padded rectangular"""
+    shape = img.shape[:2]  # shape = [height, width]
+    ratio = min(float(height) / shape[0], float(width) / shape[1])
+    new_shape = (round(shape[1] * ratio), round(shape[0] * ratio))  # new_shape = [width, height]
+    dw = (width - new_shape[0]) / 2  # width padding
+    dh = (height - new_shape[1]) / 2  # height padding
+    top, bottom = round(dh - 0.1), round(dh + 0.1)
+    left, right = round(dw - 0.1), round(dw + 0.1)
+    img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA)  # resized, no border
+    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # padded rectangular
+    return img, ratio, dw, dh
+
+
+def image_inference(pipeline_path, stream_name, img_dir, result_dir, seqs):
+    """image_inference"""
+    sdk_api = SdkApi(pipeline_path)
+    if not sdk_api.init():
+        exit(-1)
+    print(stream_name)
+    if not os.path.exists(result_dir):
+        os.makedirs(result_dir)
+
+    img_data_plugin_id = 0
+
+    print("\nBegin to inference for {}.\n".format(img_dir))
+    for seq in seqs:
+        seq_path = os.path.join(result_dir, seq)
+        if not os.path.exists(seq_path):
+            os.makedirs(seq_path)
+        data_dir = os.path.join(img_dir, 'train', seq, 'img1')
+        file_list = os.listdir(data_dir)
+        total_len = len(file_list)
+        for img_id, file_name in enumerate(file_list):
+            file_path = os.path.join(data_dir, file_name)
+            save_path = [os.path.join(seq_path, "{}_0.bin".format(os.path.splitext(file_name)[0])),
+                         os.path.join(seq_path, "{}_1.bin".format(os.path.splitext(file_name)[0]))]
+            img_np = process_img(file_path)
+            img_shape = img_np.shape
+            sdk_api.send_img_input(stream_name,
+                                   img_data_plugin_id, "appsrc0",
+                                   img_np.tobytes(), img_shape)
+            start_time = time.time()
+            result = sdk_api.get_result(stream_name)
+            end_time = time.time() - start_time
+            with open(save_path[0], "wb") as fp:
+                fp.write(result[0])
+                print(
+                    f"End-2end inference, file_name: {file_path}, "
+                    f"{img_id + 1}/{total_len}, elapsed_time: {end_time}.\n"
+                )
+            with open(save_path[1], "wb") as fp:
+                fp.write(result[1])
+                print(
+                    f"End-2end inference, file_name: {file_path}, "
+                    f"{img_id + 1}/{total_len}, elapsed_time: {end_time}.\n"
+                )
+
+
+if __name__ == "__main__":
+    args = parser_args()
+    seqs_str = '''MOT20-01
+      MOT20-02
+     MOT20-03
+     MOT20-05
+     '''
+    Seqs = [seq.strip() for seq in seqs_str.split()]
+    image_inference(args.pipeline_path, cfg.STREAM_NAME.encode("utf-8"), args.img_path,
+                    args.infer_result_dir, Seqs)
diff --git a/research/cv/fairmot/infer/sdk/run.sh b/research/cv/fairmot/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c13b4102ffdd04bcd9e51ed24ac5df56bde45611
--- /dev/null
+++ b/research/cv/fairmot/infer/sdk/run.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+python3.7 main.py --img_path ../../data/MOT20 --pipeline_path ./config/fairmot.pipeline --infer_result_dir ../../data/infer_result
diff --git a/research/cv/fairmot/modelarts/readme.md b/research/cv/fairmot/modelarts/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..63abb0ad3a6d39ec0a8f600575c02c1b52b26ebf
--- /dev/null
+++ b/research/cv/fairmot/modelarts/readme.md
@@ -0,0 +1,156 @@
+# 鍦∕odelArts涓婂簲鐢�
+
+濡傛灉闇€瑕佺敤鍒癿odelarts,璇锋妸'fairmot/src/infer_net.py'涓�28琛屾崲涓衡€榮elf.gather = ops.GatherV2()鈥欙紝37琛屾崲涓�:
+
+```python
+   feat = self.gather(feat, ind, 1)
+   feat = self.squeeze(feat)
+   feat = feat[:, :, 0, :]
+```
+
+## 鍒涘缓OBS妗�
+
+1. 鐧诲綍[OBS绠$悊鎺у埗鍙癩(https://storage.huaweicloud.com/obs)锛屽垱寤篛BS妗躲€傚叿浣撹鍙傝[鍒涘缓妗禲(https://support.huaweicloud.com/usermanual-obs/obs_03_0306.html)绔犺妭銆備緥濡傦紝鍒涘缓鍚嶇О涓衡€渕indspore-dataset鈥濈殑OBS妗躲€�
+
+   ![img](https://r.huaweistatic.com/s/ascendstatic/lst/modelZooImg/public_sys-resources/note_3.0-zh-cn.png)
+
+   鍒涘缓妗剁殑鍖哄煙闇€瑕佷笌ModelArts鎵€鍦ㄧ殑鍖哄煙涓€鑷淬€備緥濡傦細褰撳墠ModelArts鍦ㄥ崕鍖�-鍖椾含鍥涘尯鍩燂紝鍦ㄥ璞″瓨鍌ㄦ湇鍔″垱寤烘《鏃讹紝璇烽€夋嫨鍗庡寳-鍖椾含鍥涖€�
+
+2. 鍒涘缓鐢ㄤ簬瀛樻斁鏁版嵁鐨勬枃浠跺す锛屽叿浣撹鍙傝[鏂板缓鏂囦欢澶筣(https://support.huaweicloud.com/usermanual-obs/obs_03_0316.html)绔犺妭銆備緥濡傦紝鍦ㄥ凡鍒涘缓鐨凮BS妗垛€渕indspore-dataset鈥濅腑鍒涘缓濡備笅妯″瀷鐩綍銆�
+
+   ![img](res/new_fold.png)
+
+   鐩綍缁撴瀯璇存槑锛�
+
+   - code锛氬瓨鏀捐缁冭剼鏈洰褰�
+   - dataset锛氬瓨鏀捐缁冩暟鎹泦鐩綍
+   - log锛氬瓨鏀捐缁冩棩蹇楃洰褰�
+   - output锛氳缁冪敓鎴恈kpt鍜宲b妯″瀷鐩綍
+
+   灏唂airmot鑴氭湰鏂囦欢澶逛笂浼犺嚦鈥渃ode鈥濈洰褰曪紝鏁版嵁闆嗏€淓TH, CalTech, MOT17, CUHK-SYSU, PRW, CityPerson鈥�,鏁版嵁閰嶇疆鏂囦欢'data.json'浠ュ強棰勮缁冩ā鍨�'dla34-ba72cf86_ms.ckpt'涓婁紶鑷斥€渄ataset鈥濈洰褰曘€傚叾鏍煎紡搴旇鏄笅闈㈢殑鏍峰瓙锛�
+
+    ```path
+        鈹斺攢鈹€ dataset
+            |鈹€ dla34-ba72cf86_ms.ckpt
+            |鈹€ data.json
+            |鈹€ caltech.all
+            |鈹€ prw.train
+            |鈹€ mot17.half
+            |鈹€ eth.train
+            |鈹€ cuhksysu.train
+            |鈹€ citypersons.train
+            鈹溾攢鈹€ Caltech
+            |   鈹斺攢 data
+            |       鈹溾攢 images
+            |       鈹�    鈹溾攢set00_V000_1271.png
+            |       鈹�    鈹溾攢 ...
+            |       鈹�    鈹斺攢set07_V011_1391.png
+            |       鈹斺攢 labels_with_ids
+            |            鈹溾攢set00_V000_1271.txt
+            |            鈹溾攢 ...
+            |            鈹斺攢set07_V011_1391.txt
+            鈹溾攢鈹€ Cityscapes
+            |   鈹溾攢 images
+            |   鈹�   鈹溾攢test
+            |   鈹�   鈹溾攢train
+            |   鈹�   鈹斺攢val
+            |   鈹斺攢 labels_with_ids
+            |       鈹溾攢train
+            |       鈹斺攢val
+            鈹溾攢鈹€ CUHKSYSU
+            |   鈹溾攢 images
+            |   鈹�    鈹溾攢s1.jpg
+            |   鈹�    鈹溾攢 ...
+            |   鈹�    鈹斺攢s9999.jpg
+            |   鈹斺攢 labels_with_ids
+            |        鈹溾攢s1.txt
+            |        鈹溾攢 ...
+            |        鈹斺攢s9999.txt
+            鈹溾攢鈹€ ETHZ
+            |   鈹溾攢 eth01
+            |   鈹溾攢 eth02
+            |   鈹溾攢 eth03
+            |   鈹溾攢 eth05
+            |   鈹斺攢 eth07
+            鈹溾攢鈹€ PRW
+            |   鈹溾攢 images
+            |   鈹�    鈹溾攢c1s1_000151.jpg
+            |   鈹�    鈹溾攢 ...
+            |   鈹�    鈹斺攢c6s2_125693.jpg
+            |   鈹斺攢 labels_with_ids
+            |        鈹溾攢c1s1_000151.txt
+            |        鈹溾攢 ...
+            |        鈹斺攢c6s2_125693.txt
+            鈹斺攢鈹€ MOT17
+                鈹溾攢 images
+                鈹�    鈹溾攢test
+                鈹�    鈹斺攢train
+                鈹斺攢 labels_with_ids
+                     鈹斺攢train
+
+    ```
+
+    data.json涓�'train'閲岄潰鍚勪釜鏂囦欢鐩綍搴斾负"eth":"/eth.train","cuhksysu":"/cuhksysu.train"绛�
+3. 浠g爜鐢变簬闇€瑕佺敤鍒皊rc鐩綍鐨勪唬鐮侊紝鎵€浠ラ渶灏嗕笅杞藉悗鐨勪唬鐮佷腑modelarts鐩綍涓嬬殑鍚姩鏂囦欢start.py鏀惧叆鍒颁唬鐮佹牴鐩綍涓嬨€�
+
+## 鍒涘缓绠楁硶
+
+1. 鍒涘缓浣滀笟鍒楄〃涓渶瑕佸厛鍒涘缓绠楁硶锛岀偣鍑诲垱寤鸿缁冧綔涓氱晫闈㈠悗鍦ㄦ垜鐨勭畻娉曠晫闈㈢偣鍑诲垱寤恒€�
+
+2. 鍒涘缓绠楁硶鐩綍锛屽~鍐欑畻娉曞弬鏁般€傚鍦ㄤ互涓嬪~鍐橝I寮曟搸鍜屼唬鐮佸惎鍔ㄧ洰褰曞拰浠g爜鍚姩鏂囦欢銆�
+
+   ![img](res/create_algor.png)
+
+   | 鍙傛暟鍚嶇О     | 瀛愬弬鏁�       | 璇存槑                                                         |
+   | ------------ | ------------ | ------------------------------------------------------------ |
+   | AI寮曟搸         | -     | 閫夋嫨mindspore鐗堟湰杩涜璁粌 |
+   | 浠g爜鐩綍       | -     | 閫夋嫨鍚姩鏂囦欢鎵€鍦ㄧ殑鐩綍 |
+   | 鍚姩鏂囦欢       | -     | 閫夋嫨鍚姩鏂囦欢 |
+
+## 鍒涘缓璁粌浣滀笟
+
+1. 浣跨敤鍗庝负浜戝笎鍙风櫥褰昜ModelArts绠$悊鎺у埗鍙癩(https://console.huaweicloud.com/modelarts)锛屽湪宸︿晶瀵艰埅鏍忎腑閫夋嫨鈥滆缁冪鐞� > 璁粌浣滀笟鈥濓紝榛樿杩涘叆鈥滆缁冧綔涓氣€濆垪琛ㄣ€�
+
+2. 鍦ㄨ缁冧綔涓氬垪琛ㄤ腑锛屽崟鍑诲乏涓婅鈥滃垱寤衡€濓紝杩涘叆鈥滃垱寤鸿缁冧綔涓氣€濋〉闈€€�
+
+3. 鍦ㄥ垱寤鸿缁冧綔涓氶〉闈紝濉啓璁粌浣滀笟鐩稿叧鍙傛暟锛岀劧鍚庡崟鍑烩€滀笅涓€姝モ€濄€�
+
+   鏈楠ゅ彧鎻愪緵璁粌浠诲姟閮ㄥ垎鍙傛暟閰嶇疆璇存槑锛屽叾浠栧弬鏁伴厤缃鎯呰鍙傝銆奫ModelArts AI 宸ョ▼甯堢敤鎴锋寚鍗梋(https://support.huaweicloud.com/modelarts/index.html)銆嬩腑鈥滀娇鐢ㄥ父鐢ㄦ鏋惰缁冩ā鍨嬧€濈珷鑺傘€�
+
+   1. 濉啓鍩烘湰淇℃伅銆�
+
+      璁剧疆璁粌浣滀笟鍚嶇О銆�
+
+   2. 濉啓浣滀笟鍙傛暟銆�
+
+      ![img](res/create_task.png)
+
+      | 鍙傛暟鍚嶇О     | 瀛愬弬鏁�       | 璇存槑                                                         |
+      | ------------ | ------------ | ------------------------------------------------------------ |
+      | 绠楁硶         | 鎴戠殑绠楁硶     | 鍒涘缓鑷繁鐨勭畻娉� |
+      | 鏁版嵁鏉ユ簮     | 鏁版嵁瀛樺偍浣嶇疆 | 閫夋嫨OBS涓婃暟鎹泦瀛樻斁鐨勭洰褰曘€�                                  |
+      | 璁粌杈撳嚭浣嶇疆 | -            | 璁剧疆妯″瀷杈撳嚭鐩綍锛岃灏介噺閫夋嫨绌虹洰褰曟潵浣滀负璁粌杈撳嚭璺緞銆�       |
+      | 杩愯鍙傛暟     | -            | 浠g爜涓殑鍛戒护琛屽弬鏁拌缃€硷紝璇锋牴鎹偍缂栧啓鐨勭畻娉曚唬鐮侀€昏緫杩涜濉啓锛岀‘淇濆弬鏁板悕绉板拰浠g爜鐨勫弬鏁板悕绉颁繚鎸佷竴鑷淬€傚崟鍑烩€滃鍔犺繍琛屽弬鏁扳€濆彲濉啓澶氫釜鍙傛暟銆傜ず渚嬶細浼犲叆缁檚tart.py鐨勫弬鏁帮細train_url锛氳缁冭緭鍑轰綅缃紙榛樿鐢熸垚锛塪ata_url锛氭暟鎹潵婧愶紙榛樿鐢熸垚锛塺un_distribute:True 鏁版嵁閰嶇疆鏂囦欢data_cfg锛氣€榙ata.json鈥� 棰勮缁冩ā鍨媗oad_pre_model锛歞la34-ba72cf86_ms.ckpt 鍙€夊弬鏁皀um_epochs锛氳凯浠f鏁� |
+      | 浣滀笟鏃ュ織璺緞 | -            | 璁剧疆璁粌鏃ュ織瀛樻斁鐨勭洰褰曘€�                                     |
+
+   3. 閫夋嫨鐢ㄤ簬璁粌浣滀笟鐨勮祫婧愩€�
+
+      閫夋嫨璧勬簮绫诲瀷涓衡€淎scend鈥濄€�
+
+      ![img](https://r.huaweistatic.com/s/ascendstatic/lst/modelZooImg/zh-cn_image_0000001102434478.png)
+
+   4. 瀹屾垚鍙傛暟濉啓鍚庯紝鍗曞嚮鈥滄彁浜も€濄€�
+
+4. 鍦ㄢ€滆鏍肩‘璁も€濋〉闈紝纭濉啓淇℃伅鏃犺鍚庯紝鍗曞嚮鈥滅‘璁も€濓紝瀹屾垚璁粌浣滀笟鐨勫垱寤恒€�
+
+   璁粌浣滀笟涓€鑸渶瑕佽繍琛屼竴娈垫椂闂达紝鏍规嵁鎮ㄩ€夋嫨鐨勬暟鎹噺鍜岃祫婧愪笉鍚岋紝璁粌鏃堕棿灏嗚€楁椂鍑犲垎閽熷埌鍑犲崄鍒嗛挓涓嶇瓑銆�
+
+## 鏌ョ湅璁粌浠诲姟鏃ュ織
+
+1. 鍦∕odelArts绠$悊鎺у埗鍙帮紝閫夋嫨鈥滆缁冪鐞� > 璁粌浣滀笟鈥濓紝杩涘叆璁粌浣滀笟鍒楄〃椤甸潰銆�
+
+2. 鍦ㄨ缁冧綔涓氬垪琛ㄤ腑锛屽崟鍑讳綔涓氬悕绉帮紝鏌ョ湅璇ヤ綔涓氱殑璇︽儏銆�
+
+3. 閫夋嫨鈥滄棩蹇椻€濋〉绛撅紝鍙煡鐪嬭浣滀笟鏃ュ織淇℃伅銆�
+
+   ![img](https://r.huaweistatic.com/s/ascendstatic/lst/modelZooImg/zh-cn_image_0000001119918494.png)
diff --git a/research/cv/fairmot/modelarts/res/create_algor.png b/research/cv/fairmot/modelarts/res/create_algor.png
new file mode 100644
index 0000000000000000000000000000000000000000..3079e4d293b7e4866d7546a7619b116ba6a750e0
Binary files /dev/null and b/research/cv/fairmot/modelarts/res/create_algor.png differ
diff --git a/research/cv/fairmot/modelarts/res/create_task.png b/research/cv/fairmot/modelarts/res/create_task.png
new file mode 100644
index 0000000000000000000000000000000000000000..8a07ce7c07125560c2628d415156f6304cc0cf79
Binary files /dev/null and b/research/cv/fairmot/modelarts/res/create_task.png differ
diff --git a/research/cv/fairmot/modelarts/res/new_fold.png b/research/cv/fairmot/modelarts/res/new_fold.png
new file mode 100644
index 0000000000000000000000000000000000000000..88e053b550e637fa32323905b16afafda8230253
Binary files /dev/null and b/research/cv/fairmot/modelarts/res/new_fold.png differ
diff --git a/research/cv/fairmot/modelarts/start.py b/research/cv/fairmot/modelarts/start.py
new file mode 100644
index 0000000000000000000000000000000000000000..e46f65d6114b0eb6cec2f322891aa2ff0ce6ca21
--- /dev/null
+++ b/research/cv/fairmot/modelarts/start.py
@@ -0,0 +1,138 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train fairmot."""
+import json
+import os
+import glob
+import numpy as np
+from mindspore import context
+from mindspore import Tensor, export
+from mindspore import dtype as mstype
+from mindspore import Model
+import mindspore.nn as nn
+import mindspore.dataset as ds
+from mindspore.context import ParallelMode
+from mindspore.train.callback import TimeMonitor, ModelCheckpoint, CheckpointConfig
+from mindspore.communication.management import init
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+from src.opts import Opts
+from src.losses import CenterNetMultiPoseLossCell
+from src.backbone_dla_conv import DLASegConv
+from src.infer_net import InferNet
+from src.fairmot_pose import WithNetCell
+from src.fairmot_pose import WithLossCell
+from src.utils.lr_schedule import dynamic_lr
+from src.utils.jde import JointDataset
+from src.utils.callback import LossCallback
+import moxing as mox
+
+
+def train(opt):
+    """train fairmot."""
+    local_data_path = '/cache/data'
+    device_id = int(os.getenv('DEVICE_ID'))
+    device_num = int(os.getenv('RANK_SIZE'))
+    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
+                        save_graphs=False, max_call_depth=10000)
+    context.set_context(device_id=device_id)
+    context.set_auto_parallel_context(
+        device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
+    init()
+    local_data_path = os.path.join(local_data_path, str(device_id))
+    opt.data_cfg = os.path.join(local_data_path, opt.data_cfg)
+    output_path = opt.train_url
+    load_path = os.path.join(local_data_path, opt.load_pre_model)
+    print('local_data_path:', local_data_path)
+    print('mixdata_path:', opt.data_cfg)
+    print('output_path:', output_path)
+    print('load_path', load_path)
+    # data download
+    print('Download data.')
+    mox.file.copy_parallel(src_url=opt.data_url, dst_url=local_data_path)
+    f = open(opt.data_cfg)
+    data_config = json.load(f)
+    train_set_paths = data_config['train']
+    dataset_root = local_data_path
+    f.close()
+    dataset = JointDataset(
+        opt, dataset_root, train_set_paths, (1088, 608), augment=True)
+    opt = Opts().update_dataset_info_and_set_heads(opt, dataset)
+    if opt.is_modelarts or opt.run_distribute:
+        Ms_dataset = ds.GeneratorDataset(dataset, ['input', 'hm', 'reg_mask', 'ind', 'wh', 'reg', 'ids'],
+                                         shuffle=True, num_parallel_workers=8,
+                                         num_shards=device_num, shard_id=device_id)
+    else:
+        Ms_dataset = ds.GeneratorDataset(dataset, ['input', 'hm', 'reg_mask', 'ind', 'wh', 'reg', 'ids'],
+                                         shuffle=True)
+    Ms_dataset = Ms_dataset.batch(
+        batch_size=opt.batch_size, drop_remainder=True)
+    batch_dataset_size = Ms_dataset.get_dataset_size()
+    net = DLASegConv(opt.heads,
+                     down_ratio=4,
+                     final_kernel=1,
+                     last_level=5,
+                     head_conv=256)
+    net = net.set_train()
+    param_dict = load_checkpoint(load_path)
+    load_param_into_net(net, param_dict)
+    loss = CenterNetMultiPoseLossCell(opt)
+    lr = Tensor(dynamic_lr(20, opt.num_epochs, batch_dataset_size),
+                mstype.float32)
+    optimizer = nn.Adam(net.trainable_params(), learning_rate=lr)
+    net_with_loss = WithLossCell(net, loss)
+    fairmot_net = nn.TrainOneStepCell(net_with_loss, optimizer)
+
+    # define callback
+    loss_cb = LossCallback(opt.batch_size)
+    time_cb = TimeMonitor()
+    config_ckpt = CheckpointConfig(saved_network=net)
+    ckpoint_cb = ModelCheckpoint(prefix='Fairmot_{}'.format(device_id), directory=local_data_path + '/output/ckpt',
+                                 config=config_ckpt)
+    callbacks = [loss_cb, ckpoint_cb, time_cb]
+
+    # train
+    model = Model(fairmot_net)
+    model.train(opt.num_epochs, Ms_dataset, callbacks=callbacks)
+    export_AIR(local_data_path + "/output/ckpt", opt)
+    mox.file.copy_parallel(local_data_path + "/output", output_path)
+    mox.file.copy(src_url='fairmot.air', dst_url=output_path+'/fairmot.air')
+
+
+def export_AIR(ckpt_path, opt):
+    """start modelarts export"""
+    ckpt_list = glob.glob(ckpt_path + "/Fairmot_*.ckpt")
+    if not ckpt_list:
+        print("ckpt file not generated.")
+
+    ckpt_list.sort(key=os.path.getmtime)
+    ckpt_model = ckpt_list[-1]
+    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
+    backbone_net = DLASegConv(opt.heads,
+                              down_ratio=4,
+                              final_kernel=1,
+                              last_level=5,
+                              head_conv=256,
+                              is_training=True)
+    infer_net = InferNet()
+    net_ = WithNetCell(backbone_net, infer_net)
+    param_dict = load_checkpoint(ckpt_model)
+    load_param_into_net(net_, param_dict)
+    input_arr = Tensor(np.zeros([1, 3, 608, 1088]), mstype.float32)
+    export(net_, input_arr, file_name='fairmot', file_format='AIR')
+
+
+if __name__ == '__main__':
+    opt_ = Opts().parse()
+    train(opt_)
diff --git a/research/cv/fairmot/modelarts/util/config.py b/research/cv/fairmot/modelarts/util/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fda058ca1da8c0a062d4c4d564fd8140e077be9
--- /dev/null
+++ b/research/cv/fairmot/modelarts/util/config.py
@@ -0,0 +1,127 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""Parse arguments"""
+
+import os
+import ast
+import argparse
+from pprint import pprint, pformat
+import yaml
+
+
+class Config:
+    """
+    Configuration namespace. Convert dictionary to members.
+    """
+
+    def __init__(self, cfg_dict):
+        for k, v in cfg_dict.items():
+            if isinstance(v, (list, tuple)):
+                setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v])
+            else:
+                setattr(self, k, Config(v) if isinstance(v, dict) else v)
+
+    def __str__(self):
+        return pformat(self.__dict__)
+
+    def __repr__(self):
+        return self.__str__()
+
+
+def parse_cli_to_yaml(parser, cfg, helper=None, choices=None, cfg_path="default_config.yaml"):
+    """
+    Parse command line arguments to the configuration according to the default yaml.
+
+    Args:
+        parser: Parent parser.
+        cfg: Base configuration.
+        helper: Helper description.
+        cfg_path: Path to the default yaml config.
+    """
+    parser = argparse.ArgumentParser(description="[REPLACE THIS at config.py]",
+                                     parents=[parser])
+    helper = {} if helper is None else helper
+    choices = {} if choices is None else choices
+    for item in cfg:
+        if not isinstance(cfg[item], list) and not isinstance(cfg[item], dict):
+            help_description = helper[item] if item in helper else "Please reference to {}".format(cfg_path)
+            choice = choices[item] if item in choices else None
+            if isinstance(cfg[item], bool):
+                parser.add_argument("--" + item, type=ast.literal_eval, default=cfg[item], choices=choice,
+                                    help=help_description)
+            else:
+                parser.add_argument("--" + item, type=type(cfg[item]), default=cfg[item], choices=choice,
+                                    help=help_description)
+    args = parser.parse_args()
+    return args
+
+
+def parse_yaml(yaml_path):
+    """
+    Parse the yaml config file.
+
+    Args:
+        yaml_path: Path to the yaml config.
+    """
+    with open(yaml_path, 'r') as fin:
+        cfgs = yaml.load_all(fin.read(), Loader=yaml.FullLoader)
+        cfgs = [x for x in cfgs]
+        if len(cfgs) == 1:
+            cfg_helper = {}
+            cfg = cfgs[0]
+            cfg_choices = {}
+        elif len(cfgs) == 2:
+            cfg, cfg_helper = cfgs
+            cfg_choices = {}
+        elif len(cfgs) == 3:
+            cfg, cfg_helper, cfg_choices = cfgs
+        else:
+            raise ValueError("At most 3 docs (config, description for help, choices) are supported in config yaml")
+        print(cfg_helper)
+    return cfg, cfg_helper, cfg_choices
+
+
+def merge(args, cfg):
+    """
+    Merge the base config from yaml file and command line arguments.
+
+    Args:
+        args: Command line arguments.
+        cfg: Base configuration.
+    """
+    args_var = vars(args)
+    for item in args_var:
+        cfg[item] = args_var[item]
+    return cfg
+
+
+def get_config():
+    """
+    Get Config according to the yaml file and cli arguments.
+    """
+    parser = argparse.ArgumentParser(description="default name", add_help=False)
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    parser.add_argument("--config_path", type=str, default=os.path.join(current_dir, "../config.yaml"),
+                        help="Config file path")
+    path_args, _ = parser.parse_known_args()
+    default, helper, choices = parse_yaml(path_args.config_path)
+    pprint(default)
+    args = parse_cli_to_yaml(parser=parser, cfg=default, helper=helper, choices=choices, cfg_path=path_args.config_path)
+    final_config = merge(args, default)
+    return Config(final_config)
+
+
+config = get_config()
diff --git a/research/cv/fairmot/mxbase_eval.py b/research/cv/fairmot/mxbase_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..36f0c6adbf4e3cd15098e3621bb0ea3e8ca26cca
--- /dev/null
+++ b/research/cv/fairmot/mxbase_eval.py
@@ -0,0 +1,83 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""post process."""
+import os
+import os.path as osp
+import logging
+from src.opts import Opts
+from src.tracking_utils import visualization as vis
+from src.tracking_utils.log import logger
+from src.tracking_utils.utils import mkdir_if_missing
+from src.tracking_utils.evaluation import Evaluator
+from src.tracking_utils.io import read_results, unzip_objs
+import src.utils.jde as datasets
+import cv2
+import motmetrics as mm
+import numpy as np
+
+
+def main(data_root, seqs=('MOT17-01-SDP',), save_dir=None):
+    logger.setLevel(logging.INFO)
+    data_type = 'mot'
+    # run tracking
+    accs = []
+    timer_avgs, timer_calls = [], []
+    for sequence in seqs:
+        output_dir = os.path.join(save_dir, sequence)
+        mkdir_if_missing(output_dir)
+        logger.info('start seq: %s', sequence)
+        result_filename = osp.join(data_root, 'result_Files', '{}.txt'.format(sequence))
+        logger.info('Evaluate seq: %s', sequence)
+        evaluator = Evaluator(osp.join(data_root, 'train'), sequence, data_type)
+        accs.append(evaluator.eval_file(result_filename))
+        result_frame_dict = read_results(result_filename, 'mot', is_gt=False)
+        dataloader = datasets.LoadImages(osp.join(os.path.join(data_root, 'train'), sequence, 'img1'),
+                                         (1088, 608))
+        for i, (_, _, img0) in enumerate(dataloader):
+            frame_id = i+1
+            trk_objs = result_frame_dict.get(frame_id, [])
+            trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
+            online_im = vis.plot_tracking(img0, trk_tlwhs, trk_ids, frame_id=frame_id)
+            cv2.imwrite(os.path.join(output_dir, '{:05d}.jpg'.format(frame_id)), online_im)
+    timer_avgs = np.asarray(timer_avgs)
+    timer_calls = np.asarray(timer_calls)
+    all_time = np.dot(timer_avgs, timer_calls)
+    avg_time = all_time / np.sum(timer_calls)
+    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
+
+    # get summary
+    metrics = mm.metrics.motchallenge_metrics
+    mh = mm.metrics.create()
+    summary = Evaluator.get_summary(accs, seqs, metrics)
+    strsummary = mm.io.render_summary(
+        summary,
+        formatters=mh.formatters,
+        namemap=mm.io.motchallenge_metric_names
+    )
+    print(strsummary)
+    Evaluator.save_summary(summary, os.path.join(data_root, 'summary.xlsx'))
+
+
+if __name__ == '__main__':
+    opts = Opts().init()
+    seqs_str = '''  MOT20-01
+                    MOT20-02
+                    MOT20-03
+                    MOT20-05'''
+    seq = [seq.strip() for seq in seqs_str.split()]
+    save_path = os.path.join(opts.data_dir, 'result')
+    main(data_root=opts.data_dir,
+         seqs=seq,
+         save_dir=save_path)
diff --git a/research/cv/fairmot/scripts/docker_start.sh b/research/cv/fairmot/scripts/docker_start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e99bbe7685ba6f302f694e54a5d15ebe0f1c6b08
--- /dev/null
+++ b/research/cv/fairmot/scripts/docker_start.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+docker_image=$1
+data_dir=$2
+model_dir=$3
+
+docker run -it --ipc=host \
+               --device=/dev/davinci0 \
+               --device=/dev/davinci1 \
+               --device=/dev/davinci2 \
+               --device=/dev/davinci3 \
+               --device=/dev/davinci4 \
+               --device=/dev/davinci5 \
+               --device=/dev/davinci6 \
+               --device=/dev/davinci7 \
+               --device=/dev/davinci_manager \
+               --device=/dev/devmm_svm \
+               --device=/dev/hisi_hdc \
+               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons \
+               -v ${data_dir}:${data_dir} \
+               -v ${model_dir}:${model_dir} \
+               -v /var/log/npu/conf/slog/slog.conf:/var/log/npu/conf/slog/slog.conf \
+               -v /var/log/npu/slog/:/var/log/npu/slog/ \
+               -v /var/log/npu/profiling/:/var/log/npu/profiling \
+               -v /var/log/npu/dump/:/var/log/npu/dump \
+               -v /var/log/npu/:/usr/slog ${docker_image} \
+               /bin/bash
\ No newline at end of file
diff --git a/research/cv/fairmot/sdk_eval.py b/research/cv/fairmot/sdk_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d407ee0304155e8a99fdf6bb7dd512e726f09bc
--- /dev/null
+++ b/research/cv/fairmot/sdk_eval.py
@@ -0,0 +1,178 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""post process."""
+import os
+import os.path as osp
+import logging
+from src.opts import Opts
+from src.tracking_utils import visualization as vis
+from src.tracker.multitracker_sdk import JDETracker
+from src.tracking_utils.log import logger
+from src.tracking_utils.utils import mkdir_if_missing
+from src.tracking_utils.evaluation import Evaluator
+from src.tracking_utils.timer import Timer
+import src.utils.jde as datasets
+import cv2
+import motmetrics as mm
+import numpy as np
+
+
+def get_eval_result(img_path, result_path):
+    """read bin file"""
+    tempfilename = os.path.split(img_path)[1]
+    filename, _ = os.path.splitext(tempfilename)
+    id_feature_result_file = os.path.join(result_path, filename + "_0.bin")
+    dets_result_file = os.path.join(result_path, filename + "_1.bin")
+    id_feature = np.fromfile(id_feature_result_file, dtype=np.float32).reshape(500, 128)
+    dets = np.fromfile(dets_result_file, dtype=np.float32).reshape(1, 500, 6)
+    return [id_feature, dets]
+
+
+def write_results(filename, results, data_type):
+    """write eval results."""
+    if data_type == 'mot':
+        save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
+    elif data_type == 'kitti':
+        save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
+    else:
+        raise ValueError(data_type)
+
+    with open(filename, 'w') as f:
+        for frame_id, tlwhs, track_ids in results:
+            if data_type == 'kitti':
+                frame_id -= 1
+            for tlwh, track_id in zip(tlwhs, track_ids):
+                if track_id < 0:
+                    continue
+                x1, y1, w, h = tlwh
+                x2, y2 = x1 + w, y1 + h
+                line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
+                f.write(line)
+    logger.info('save results to %s', filename)
+
+
+def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30,
+             result_path=None):
+    """evaluation sequence."""
+    if save_dir:
+        mkdir_if_missing(save_dir)
+    tracker = JDETracker(opt, frame_rate=frame_rate)
+    timer = Timer()
+    results = []
+    frame_id = 0
+    for path, img, img0 in dataloader:
+        result = get_eval_result(path, result_path)
+        if frame_id % 20 == 0:
+            logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
+        # run tracking
+        timer.tic()
+        blob = np.expand_dims(img, 0)
+        height, width = img0.shape[0], img0.shape[1]
+        inp_height, inp_width = [blob.shape[2], blob.shape[3]]
+        c = np.array([width / 2., height / 2.], dtype=np.float32)
+        s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
+        meta = {'c': c, 's': s, 'out_height': inp_height // opt.down_ratio,
+                'out_width': inp_width // opt.down_ratio}
+        online_targets = tracker.update(result[0], result[1], meta)
+        online_tlwhs = []
+        online_ids = []
+        for t in online_targets:
+            tlwh = t.tlwh
+            tid = t.track_id
+            vertical = tlwh[2] / tlwh[3] > 1.6
+            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
+                online_tlwhs.append(tlwh)
+                online_ids.append(tid)
+        timer.toc()
+        results.append((frame_id + 1, online_tlwhs, online_ids))
+        if show_image or save_dir is not None:
+            online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
+                                          fps=1. / timer.average_time)
+        if show_image:
+            cv2.imshow('online_im', online_im)
+        if save_dir is not None:
+            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
+        frame_id += 1
+    write_results(result_filename, results, data_type)
+    return frame_id, timer.average_time, timer.calls
+
+
+def main(opt, data_root, result_path, seqs=('MOT17-01-SDP',), save_images=True, save_videos=False, show_image=False):
+    logger.setLevel(logging.INFO)
+    result_root = os.path.join(data_root, '..', 'results')
+    mkdir_if_missing(result_root)
+    data_type = 'mot'
+    # run tracking
+    accs = []
+    n_frame = 0
+    timer_avgs, timer_calls = [], []
+    for sequence in seqs:
+        output_dir = os.path.join(data_root, '..', 'outputs', sequence) \
+            if save_images or save_videos else None
+        logger.info('start seq: %s', sequence)
+        dataloader = datasets.LoadImages(osp.join(data_root, sequence, 'img1'), (1088, 608))
+        result_filename = osp.join(result_root, '{}.txt'.format(sequence))
+        meta_info = open(osp.join(data_root, sequence, 'seqinfo.ini')).read()
+        frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
+        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
+                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate,
+                              result_path=osp.join(result_path, sequence))
+        n_frame += nf
+        timer_avgs.append(ta)
+        timer_calls.append(tc)
+        logger.info('Evaluate seq: %s', sequence)
+        evaluator = Evaluator(data_root, sequence, data_type)
+        accs.append(evaluator.eval_file(result_filename))
+        if save_videos:
+            print(output_dir)
+            output_video_path = osp.join(output_dir, '{}.mp4'.format(sequence))
+            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
+            os.system(cmd_str)
+    timer_avgs = np.asarray(timer_avgs)
+    timer_calls = np.asarray(timer_calls)
+    all_time = np.dot(timer_avgs, timer_calls)
+    avg_time = all_time / np.sum(timer_calls)
+    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
+
+    # get summary
+    metrics = mm.metrics.motchallenge_metrics
+    mh = mm.metrics.create()
+    summary = Evaluator.get_summary(accs, seqs, metrics)
+    strsummary = mm.io.render_summary(
+        summary,
+        formatters=mh.formatters,
+        namemap=mm.io.motchallenge_metric_names
+    )
+    print(strsummary)
+    Evaluator.save_summary(summary, os.path.join(result_root, 'summary.xlsx'))
+
+
+if __name__ == '__main__':
+    opts = Opts().init()
+    seqs_str = '''  MOT20-01
+                    MOT20-02
+                    MOT20-03
+                    MOT20-05'''
+
+    data_roots = os.path.join(opts.data_dir, 'train')
+    seq = [seq.strip() for seq in seqs_str.split()]
+    result_ = os.path.join(opts.data_dir, '../infer_result')
+    main(opts,
+         data_root=data_roots,
+         result_path=result_,
+         seqs=seq,
+         show_image=False,
+         save_images=True,
+         save_videos=False)
diff --git a/research/cv/fairmot/src/tracker/multitracker_sdk.py b/research/cv/fairmot/src/tracker/multitracker_sdk.py
new file mode 100644
index 0000000000000000000000000000000000000000..be6f6796e92643d510bf2ba0d2e5293c8deabd27
--- /dev/null
+++ b/research/cv/fairmot/src/tracker/multitracker_sdk.py
@@ -0,0 +1,384 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Fairmot for multitracker
+"""
+import collections
+import numpy as np
+from src.tracker import matching
+from src.utils.tools import ctdet_post_process
+from src.tracker.basetrack import BaseTrack, TrackState
+from src.tracking_utils.kalman_filter import KalmanFilter
+
+
+class Track(BaseTrack):
+    """
+    Fairmot for Track
+    """
+    shared_kalman = KalmanFilter()
+
+    def __init__(self, tlwh, score, temp_feat, buffer_size=30):
+
+        # wait activate
+        self._tlwh = np.asarray(tlwh, dtype=np.float)
+        self.kalman_filter = None
+        self.mean, self.covariance = None, None
+        self.is_activated = False
+        self.track_id = None
+        self.start_frame = None
+        self.score = score
+        self.tracklet_len = 0
+        self.frame_id = None
+        self.smooth_feat = None
+        self.update_features(temp_feat)
+        self.features = collections.deque([], maxlen=buffer_size)
+        self.alpha = 0.9
+        self.state = None
+
+    def update_features(self, feat):
+        """update features"""
+        feat /= np.linalg.norm(feat)
+        self.curr_feat = feat
+        if self.smooth_feat is None:
+            self.smooth_feat = feat
+        else:
+            self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
+        self.features.append(feat)
+        self.smooth_feat /= np.linalg.norm(self.smooth_feat)
+
+    def predict(self):
+        """predict"""
+        mean_state = self.mean.copy()
+        if self.state != TrackState.Tracked:
+            mean_state[7] = 0
+        self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
+
+    @staticmethod
+    def multi_predict(stracks):
+        """multi predict"""
+        if stracks:
+            multi_mean = np.asarray([st.mean.copy() for st in stracks])
+            multi_covariance = np.asarray([st.covariance for st in stracks])
+            for i, st in enumerate(stracks):
+                if st.state != TrackState.Tracked:
+                    multi_mean[i][7] = 0
+            multi_mean, multi_covariance = Track.shared_kalman.multi_predict(multi_mean, multi_covariance)
+            for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
+                stracks[i].mean = mean
+                stracks[i].covariance = cov
+
+    def activate(self, kalman_filter, frame_id):
+        """Start a new tracklet"""
+        self.kalman_filter = kalman_filter
+        self.track_id = self.next_id()
+        self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
+
+        self.tracklet_len = 0
+        self.state = TrackState.Tracked
+        if frame_id == 1:
+            self.is_activated = True
+        # self.is_activated = True
+        self.frame_id = frame_id
+        self.start_frame = frame_id
+
+    def re_activate(self, new_track, frame_id, new_id=False):
+        """reactivate a matched track"""
+        self.mean, self.covariance = self.kalman_filter.update(
+            self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
+        )
+
+        self.update_features(new_track.curr_feat)
+        self.tracklet_len = 0
+        self.state = TrackState.Tracked
+        self.is_activated = True
+        self.frame_id = frame_id
+        if new_id:
+            self.track_id = self.next_id()
+
+    def update(self, new_track, frame_id, update_feature=True):
+        """
+        Update a matched track
+        :type new_track: Track
+        :type frame_id: int
+        :type update_feature: bool
+        :return:
+        """
+        self.frame_id = frame_id
+        self.tracklet_len += 1
+
+        new_tlwh = new_track.tlwh
+        self.mean, self.covariance = self.kalman_filter.update(
+            self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
+        self.state = TrackState.Tracked
+        self.is_activated = True
+
+        self.score = new_track.score
+        if update_feature:
+            self.update_features(new_track.curr_feat)
+
+    @property
+    # @jit(nopython=True)
+    def tlwh(self):
+        """Get current position in bounding box format `(top left x, top left y,
+                width, height)`.
+        """
+        if self.mean is None:
+            return self._tlwh.copy()
+        ret = self.mean[:4].copy()
+        ret[2] *= ret[3]
+        ret[:2] -= ret[2:] / 2
+        return ret
+
+    @property
+    # @jit(nopython=True)
+    def tlbr(self):
+        """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
+        `(top left, bottom right)`.
+        """
+        ret = self.tlwh.copy()
+        ret[2:] += ret[:2]
+        return ret
+
+    @staticmethod
+    # @jit(nopython=True)
+    def tlwh_to_xyah(tlwh):
+        """Convert bounding box to format `(center x, center y, aspect ratio,
+        height)`, where the aspect ratio is `width / height`.
+        """
+        ret = np.asarray(tlwh).copy()
+        ret[:2] += ret[2:] / 2
+        ret[2] /= ret[3]
+        return ret
+
+    def to_xyah(self):
+        """to xyah"""
+        return self.tlwh_to_xyah(self.tlwh)
+
+    @staticmethod
+    # @jit(nopython=True)
+    def tlbr_to_tlwh(tlbr):
+        """tlbr to tlwh"""
+        ret = np.asarray(tlbr).copy()
+        ret[2:] -= ret[:2]
+        return ret
+
+    @staticmethod
+    # @jit(nopython=True)
+    def tlwh_to_tlbr(tlwh):
+        """tlwh to tlbr"""
+        ret = np.asarray(tlwh).copy()
+        ret[2:] += ret[:2]
+        return ret
+
+    def __repr__(self):
+        return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
+
+
+class JDETracker:
+    """
+    Fairmot for JDETracker
+    """
+
+    def __init__(self, opt, frame_rate=30):
+        self.opt = opt
+        self.tracked_stracks = []  # type: list[Track]
+        self.lost_stracks = []  # type: list[Track]
+        self.removed_stracks = []  # type: list[Track]
+
+        self.frame_id = 0
+        self.det_thresh = opt.conf_thres
+        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
+        self.max_time_lost = self.buffer_size
+        self.max_per_image = opt.K
+        self.mean = np.array([[[0.408, 0.447, 0.47]]], dtype=np.float32)
+        self.std = np.array([[[0.289, 0.274, 0.278]]], dtype=np.float32)
+        self.kalman_filter = KalmanFilter()
+
+    def update(self, id_feature, dets, meta):
+        """update track frame"""
+        self.frame_id += 1
+        activated_starcks, refind_stracks, lost_stracks, removed_stracks = [], [], [], []
+        dets = self.post_process(dets, meta)
+        dets = self.merge_outputs([dets])[1]
+        remain_inds = dets[:, 4] > self.opt.conf_thres
+        dets = dets[remain_inds]
+        id_feature = id_feature[remain_inds]
+        detections = self.create_detections(dets, id_feature)
+        # Add newly detected tracklets to tracked_stracks
+        unconfirmed, tracked_stracks = [], []
+        for track in self.tracked_stracks:
+            if not track.is_activated:
+                unconfirmed.append(track)
+            else:
+                tracked_stracks.append(track)
+        # Step 2: First association, with embedding
+        strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
+        Track.multi_predict(strack_pool)
+        dists = matching.embedding_distance(strack_pool, detections)
+        dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections)
+        matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.4)
+        for itracked, idet in matches:
+            track, det = strack_pool[itracked], detections[idet]
+            if track.state == TrackState.Tracked:
+                track.update(detections[idet], self.frame_id)
+                activated_starcks.append(track)
+            else:
+                track.re_activate(det, self.frame_id, new_id=False)
+                refind_stracks.append(track)
+        # Step 3: Second association, with IOU
+        detections = [detections[i] for i in u_detection]
+        r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
+        dists = matching.iou_distance(r_tracked_stracks, detections)
+        matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
+        for itracked, idet in matches:
+            track = r_tracked_stracks[itracked]
+            det = detections[idet]
+            if track.state == TrackState.Tracked:
+                track.update(det, self.frame_id)
+                activated_starcks.append(track)
+            else:
+                track.re_activate(det, self.frame_id, new_id=False)
+                refind_stracks.append(track)
+        for it in u_track:
+            track = r_tracked_stracks[it]
+            if not track.state == TrackState.Lost:
+                track.mark_lost()
+                lost_stracks.append(track)
+        # Deal with unconfirmed tracks, usually tracks with only one beginning frame
+        detections = [detections[i] for i in u_detection]
+        dists = matching.iou_distance(unconfirmed, detections)
+        matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
+        for itracked, idet in matches:
+            unconfirmed[itracked].update(detections[idet], self.frame_id)
+            activated_starcks.append(unconfirmed[itracked])
+        for it in u_unconfirmed:
+            track = unconfirmed[it]
+            track.mark_removed()
+            removed_stracks.append(track)
+        # Step 4: Init new stracks
+        activated_starcks = self.init_new_stracks(u_detection, detections, activated_starcks)
+        # Step 5: Update state
+        removed_stracks = self.update_state(removed_stracks)
+        self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
+        self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
+        self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
+        self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
+        self.lost_stracks.extend(lost_stracks)
+        self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
+        self.removed_stracks.extend(removed_stracks)
+        self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
+        # get scores of lost tracks
+        output_stracks = [track for track in self.tracked_stracks if track.is_activated]
+        return output_stracks
+
+    def post_process(self, dets, meta):
+        """post process"""
+        dets = dets.reshape(1, -1, dets.shape[2])
+        dets = ctdet_post_process(
+            dets.copy(), [meta['c']], [meta['s']],
+            meta['out_height'], meta['out_width'], self.opt.num_classes)
+        for j in range(1, self.opt.num_classes + 1):
+            dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
+        return dets[0]
+
+    def create_detections(self, dets, id_feature):
+        """create detections"""
+        if np.shape(dets)[0]:
+            detections = []
+            for tlbrs, f in zip(dets[:, :5], id_feature):
+                detections.append(Track(Track.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30))
+        else:
+            detections = []
+        return detections
+
+    def merge_outputs(self, detections):
+        """merge outputs"""
+        results = {}
+        for j in range(1, self.opt.num_classes + 1):
+            results[j] = np.concatenate(
+                [detection[j] for detection in detections], axis=0).astype(np.float32)
+
+        scores = np.hstack(
+            [results[j][:, 4] for j in range(1, self.opt.num_classes + 1)])
+        if len(scores) > self.max_per_image:
+            kth = len(scores) - self.max_per_image
+            thresh = np.partition(scores, kth)[kth]
+            for j in range(1, self.opt.num_classes + 1):
+                keep_inds = (results[j][:, 4] >= thresh)
+                results[j] = results[j][keep_inds]
+        return results
+
+    def init_new_stracks(self, u_detection, detections, activated_starcks):
+        """init new stracks"""
+        for inew in u_detection:
+            track = detections[inew]
+            if track.score < self.det_thresh:
+                continue
+            track.activate(self.kalman_filter, self.frame_id)
+            activated_starcks.append(track)
+        return activated_starcks
+
+    def update_state(self, removed_stracks):
+        """update state"""
+        for track in self.lost_stracks:
+            if self.frame_id - track.end_frame > self.max_time_lost:
+                track.mark_removed()
+                removed_stracks.append(track)
+        return removed_stracks
+
+
+def joint_stracks(tlista, tlistb):
+    """joint stracks"""
+
+    exists = {}
+    res = []
+    for t in tlista:
+        exists[t.track_id] = 1
+        res.append(t)
+    for t in tlistb:
+        tid = t.track_id
+        if not exists.get(tid, 0):
+            exists[tid] = 1
+            res.append(t)
+    return res
+
+
+def sub_stracks(tlista, tlistb):
+    """sub stracks"""
+    stracks = {}
+    for t in tlista:
+        stracks[t.track_id] = t
+    for t in tlistb:
+        tid = t.track_id
+        if stracks.get(tid, 0):
+            del stracks[tid]
+    return list(stracks.values())
+
+
+def remove_duplicate_stracks(stracksa, stracksb):
+    """remove duplicate stracks"""
+    pdist = matching.iou_distance(stracksa, stracksb)
+    pairs = np.where(pdist < 0.15)
+    dupa, dupb = list(), list()
+    for p, q in zip(*pairs):
+        timep = stracksa[p].frame_id - stracksa[p].start_frame
+        timeq = stracksb[q].frame_id - stracksb[q].start_frame
+        if timep > timeq:
+            dupb.append(q)
+        else:
+            dupa.append(p)
+    resa = [t for i, t in enumerate(stracksa) if not i in dupa]
+    resb = [t for i, t in enumerate(stracksb) if not i in dupb]
+    return resa, resb