From 6b9541267ebd66b945b02cf4038dad418b50a093 Mon Sep 17 00:00:00 2001
From: ganqijun <1195950844@qq.com>
Date: Fri, 16 Sep 2022 21:09:52 +0800
Subject: [PATCH] update

---
 .../recommend/tbnet/infer/convert/air2om.sh   |  25 ++
 .../tbnet/infer/data/config/tbnet.pipeline    |  70 +++++
 .../tbnet/infer/docker_start_infer.sh         |  49 ++++
 .../tbnet/infer/mxbase/CMakeLists.txt         |  35 +++
 .../recommend/tbnet/infer/mxbase/build.sh     |  48 +++
 .../tbnet/infer/mxbase/src/Tbnet.cpp          | 273 ++++++++++++++++++
 .../recommend/tbnet/infer/mxbase/src/Tbnet.h  |  61 ++++
 .../recommend/tbnet/infer/mxbase/src/main.cpp |  58 ++++
 official/recommend/tbnet/infer/sdk/main.py    | 113 ++++++++
 .../tbnet/infer/sdk/prec/postprocess.py       |  90 ++++++
 official/recommend/tbnet/infer/sdk/run.sh     |  27 ++
 .../tbnet/modelarts/train_modelarts.py        | 158 ++++++++++
 .../recommend/tbnet/scripts/docker_start.sh   |  38 +++
 13 files changed, 1045 insertions(+)
 create mode 100644 official/recommend/tbnet/infer/convert/air2om.sh
 create mode 100644 official/recommend/tbnet/infer/data/config/tbnet.pipeline
 create mode 100644 official/recommend/tbnet/infer/docker_start_infer.sh
 create mode 100644 official/recommend/tbnet/infer/mxbase/CMakeLists.txt
 create mode 100644 official/recommend/tbnet/infer/mxbase/build.sh
 create mode 100644 official/recommend/tbnet/infer/mxbase/src/Tbnet.cpp
 create mode 100644 official/recommend/tbnet/infer/mxbase/src/Tbnet.h
 create mode 100644 official/recommend/tbnet/infer/mxbase/src/main.cpp
 create mode 100644 official/recommend/tbnet/infer/sdk/main.py
 create mode 100644 official/recommend/tbnet/infer/sdk/prec/postprocess.py
 create mode 100644 official/recommend/tbnet/infer/sdk/run.sh
 create mode 100644 official/recommend/tbnet/modelarts/train_modelarts.py
 create mode 100644 official/recommend/tbnet/scripts/docker_start.sh

diff --git a/official/recommend/tbnet/infer/convert/air2om.sh b/official/recommend/tbnet/infer/convert/air2om.sh
new file mode 100644
index 000000000..097818ded
--- /dev/null
+++ b/official/recommend/tbnet/infer/convert/air2om.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+model_path=$1
+output_model_name=$2
+
+atc --model=$model_path \
+        --framework=1 \
+        --output=$output_model_name \
+        --input_format=NCHW \
+        --soc_version=Ascend310 \
+        --output_type=FP32
\ No newline at end of file
diff --git a/official/recommend/tbnet/infer/data/config/tbnet.pipeline b/official/recommend/tbnet/infer/data/config/tbnet.pipeline
new file mode 100644
index 000000000..d7056e33b
--- /dev/null
+++ b/official/recommend/tbnet/infer/data/config/tbnet.pipeline
@@ -0,0 +1,70 @@
+{
+    "tbnet": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:0"
+        },
+        "appsrc1": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:1"
+        },
+        "appsrc2": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:2"
+        },
+        "appsrc3": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:3"
+        },
+        "appsrc4": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:4"
+        },
+        "appsrc5": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:5"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "appsrc0,appsrc1,appsrc2,appsrc3,appsrc4,appsrc5",
+                "modelPath": "../data/model/tbnet.om"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_tensorinfer0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+    }
+}
diff --git a/official/recommend/tbnet/infer/docker_start_infer.sh b/official/recommend/tbnet/infer/docker_start_infer.sh
new file mode 100644
index 000000000..2678ff3f9
--- /dev/null
+++ b/official/recommend/tbnet/infer/docker_start_infer.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+model_dir=$2
+
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image model_dir data_dir"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${model_dir}" ]; then
+        echo "please input model_dir"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it -u root \
+  --device=/dev/davinci0 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${model_dir}:${model_dir} \
+  ${docker_image} \
+  /bin/bash
diff --git a/official/recommend/tbnet/infer/mxbase/CMakeLists.txt b/official/recommend/tbnet/infer/mxbase/CMakeLists.txt
new file mode 100644
index 000000000..e97200d02
--- /dev/null
+++ b/official/recommend/tbnet/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,35 @@
+cmake_minimum_required(VERSION 3.5.2)
+project(Tbnet)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+
+set(TARGET_MAIN Tbnet)
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+include_directories(${ACL_LIB_PATH}/include)
+link_directories(${ACL_LIB_PATH}/lib64/)
+
+
+
+add_executable(${TARGET_MAIN} src/main.cpp src/Tbnet.cpp)
+target_link_libraries(${TARGET_MAIN} ${TARGET_LIBRARY} glog  cpprest mxbase libascendcl.so)
diff --git a/official/recommend/tbnet/infer/mxbase/build.sh b/official/recommend/tbnet/infer/mxbase/build.sh
new file mode 100644
index 000000000..71bc7df1a
--- /dev/null
+++ b/official/recommend/tbnet/infer/mxbase/build.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export ASCEND_VERSION=ascend-toolkit/latest
+export ARCH_PATTERN=.
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib/modelpostprocessors:${LD_LIBRARY_PATH}
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make);
+    then
+      echo "make failed."
+      return 1
+    fi
+
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
+
diff --git a/official/recommend/tbnet/infer/mxbase/src/Tbnet.cpp b/official/recommend/tbnet/infer/mxbase/src/Tbnet.cpp
new file mode 100644
index 000000000..368cc46dd
--- /dev/null
+++ b/official/recommend/tbnet/infer/mxbase/src/Tbnet.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tbnet.h"
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <cmath>
+#include <vector>
+#include <algorithm>
+#include <queue>
+#include <utility>
+#include <fstream>
+#include <map>
+#include <iostream>
+#include "acl/acl.h"
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+namespace {
+    const std::vector<std::vector<uint32_t>> SHAPE = {{1}, {1, 39}, {1, 39},
+                                                      {1, 39}, {1, 39}, {1}};
+    const int FLOAT_SIZE = 4;
+    const int INT_SIZE = 8;
+    const int DATA_SIZE_1 = 1;
+    const int DATA_SIZE_39 = 39;
+}
+
+void WriteResult(const std::string &file_name, const std::vector<MxBase::TensorBase> &outputs) {
+    std::string homePath = "./result";
+    for (size_t i = 0; i < outputs.size(); ++i) {
+        float *boxes = reinterpret_cast<float *>(outputs[i].GetBuffer());
+        std::string outFileName = homePath + "/tbnet_item_bs1_" + file_name + "_" +
+                                  std::to_string(i) + ".txt";
+        std::ofstream outfile(outFileName, std::ios::app);
+        size_t outputSize;
+        outputSize = outputs[i].GetSize();
+        for (size_t j = 0; j < outputSize; ++j) {
+            if (j != 0) {
+                outfile << ",";
+            }
+            outfile << boxes[j];
+        }
+        outfile.close();
+    }
+}
+
+APP_ERROR Tbnet::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_Tbnet = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_Tbnet->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR Tbnet::DeInit() {
+    model_Tbnet->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR Tbnet::ReadBin_float(const std::string &path, std::vector<std::vector<float>> &dataset,
+                         const int datasize) {
+    std::ifstream inFile(path, std::ios::binary);
+
+    float *data = new float[datasize];
+    inFile.read(reinterpret_cast<char *>(data), datasize * sizeof(data[0]));
+    std::vector<float> temp(data, data + datasize);
+    dataset.push_back(temp);
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR Tbnet::ReadBin_int(const std::string &path, std::vector<std::vector<int64_t>> &dataset,
+                         const int datasize) {
+    std::ifstream inFile(path, std::ios::binary);
+
+    int64_t *data = new int64_t[datasize];
+    inFile.read(reinterpret_cast<char *>(data), datasize * sizeof(data[0]));
+    std::vector<int64_t> temp(data, data + datasize);
+    dataset.push_back(temp);
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR Tbnet::VectorToTensorBase_float(const std::vector<std::vector<float>> &input,
+                                    MxBase::TensorBase &tensorBase,
+                                    const std::vector<uint32_t> &shape) {
+    uint32_t dataSize = 1;
+    for (int i = 0; i < shape.size(); i++) {
+        dataSize = dataSize * shape[i];
+    }     // input shape
+    float *metaFeatureData = new float[dataSize];
+
+    uint32_t idx = 0;
+    for (size_t bs = 0; bs < input.size(); bs++) {
+        for (size_t c = 0; c < input[bs].size(); c++) {
+            metaFeatureData[idx++] = input[bs][c];
+        }
+    }
+    MxBase::MemoryData memoryDataDst(dataSize * FLOAT_SIZE, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(reinterpret_cast<void *>(metaFeatureData), dataSize * FLOAT_SIZE,
+                                     MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+
+    tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_FLOAT32);
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR Tbnet::VectorToTensorBase_int(const std::vector<std::vector<int64_t>> &input,
+                                    MxBase::TensorBase &tensorBase,
+                                    const std::vector<uint32_t> &shape) {
+    int dataSize = 1;
+    for (int i = 0; i < shape.size(); i++) {
+        dataSize = dataSize * shape[i];
+    }     // input shape
+
+    int64_t *metaFeatureData = new int64_t[dataSize];
+
+    uint32_t idx = 0;
+    for (size_t bs = 0; bs < input.size(); bs++) {
+        for (size_t c = 0; c < input[bs].size(); c++) {
+            metaFeatureData[idx++] = input[bs][c];
+        }
+    }
+    MxBase::MemoryData memoryDataDst(dataSize * INT_SIZE, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(reinterpret_cast<void *>(metaFeatureData), dataSize * INT_SIZE,
+                                     MxBase::MemoryData::MEMORY_HOST_MALLOC);
+
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+
+    tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_INT64);
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR Tbnet::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                                      std::vector<MxBase::TensorBase> &outputs) {
+    auto dtypes = model_Tbnet->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs.push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_Tbnet->ModelInference(inputs, outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    inferCostTimeMilliSec += costMs;
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference Tbnet failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR Tbnet::Process(const int &index, const std::string &datapath,
+                         const InitParam &initParam, std::vector<int> &outputs) {
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs_tb = {};
+
+    std::vector<std::vector<int64_t>> item;
+    APP_ERROR ret = ReadBin_int(datapath + "00_item/tbnet_item_bs1_" +
+                            std::to_string(index) + ".bin", item, DATA_SIZE_1);
+    std::vector<std::vector<int64_t>> rl1;
+    ReadBin_int(datapath + "01_rl1/tbnet_rl1_bs1_" +
+                std::to_string(index) + ".bin", rl1, DATA_SIZE_39);
+    std::vector<std::vector<int64_t>> ety;
+    ReadBin_int(datapath + "02_ety/tbnet_ety_bs1_" +
+                std::to_string(index) + ".bin", ety, DATA_SIZE_39);
+    std::vector<std::vector<int64_t>> rl2;
+    ReadBin_int(datapath + "03_rl2/tbnet_rl2_bs1_" +
+                std::to_string(index) + ".bin", rl2, DATA_SIZE_39);
+    std::vector<std::vector<int64_t>> his;
+    ReadBin_int(datapath + "04_his/tbnet_his_bs1_" +
+                std::to_string(index) + ".bin", his, DATA_SIZE_39);
+    std::vector<std::vector<float>> rate;
+    ReadBin_float(datapath + "05_rate/tbnet_rate_bs1_" +
+                  std::to_string(index) + ".bin", rate, DATA_SIZE_1);
+
+    if (ret != APP_ERR_OK) {
+        LogError << "ToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    MxBase::TensorBase tensorBase0;
+    APP_ERROR ret1 = VectorToTensorBase_int(item, tensorBase0, SHAPE[0]);
+    inputs.push_back(tensorBase0);
+    MxBase::TensorBase tensorBase1;
+    VectorToTensorBase_int(rl1, tensorBase1, SHAPE[1]);
+    inputs.push_back(tensorBase1);
+    MxBase::TensorBase tensorBase2;
+    VectorToTensorBase_int(ety, tensorBase2, SHAPE[2]);
+    inputs.push_back(tensorBase2);
+    MxBase::TensorBase tensorBase3;
+    VectorToTensorBase_int(rl2, tensorBase3, SHAPE[3]);
+    inputs.push_back(tensorBase3);
+    MxBase::TensorBase tensorBase4;
+    VectorToTensorBase_int(his, tensorBase4, SHAPE[4]);
+    inputs.push_back(tensorBase4);
+    MxBase::TensorBase tensorBase5;
+    VectorToTensorBase_float(rate, tensorBase5, SHAPE[5]);
+    inputs.push_back(tensorBase5);
+
+    if (ret1 != APP_ERR_OK) {
+        LogError << "ToTensorBase failed, ret=" << ret1 << ".";
+        return ret1;
+    }
+
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret3 = Inference(inputs, outputs_tb);
+
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    inferCostTimeMilliSec += costMs;
+    if (ret3 != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret3 << ".";
+        return ret3;
+    }
+    for (size_t i = 0; i < outputs_tb.size(); ++i) {
+        if (!outputs_tb[i].IsHost()) {
+            outputs_tb[i].ToHost();
+        }
+    }
+    WriteResult(std::to_string(index), outputs_tb);
+}
diff --git a/official/recommend/tbnet/infer/mxbase/src/Tbnet.h b/official/recommend/tbnet/infer/mxbase/src/Tbnet.h
new file mode 100644
index 000000000..d1487a893
--- /dev/null
+++ b/official/recommend/tbnet/infer/mxbase/src/Tbnet.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_Tbnet_H
+#define MXBASE_Tbnet_H
+#include <memory>
+#include <string>
+#include <vector>
+#include "acl/acl.h"
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+#include "MxBase/CV/Core/DataType.h"
+
+struct InitParam {
+    uint32_t deviceId;
+    bool checkTensor;
+    std::string modelPath;
+};
+
+class Tbnet {
+ public:
+  APP_ERROR Init(const InitParam &initParam);
+  APP_ERROR DeInit();
+  APP_ERROR VectorToTensorBase_int(const std::vector<std::vector<int64_t>> &input, MxBase::TensorBase &tensorBase,
+                               const std::vector<uint32_t> &shape);
+  APP_ERROR VectorToTensorBase_float(const std::vector<std::vector<float>> &input, MxBase::TensorBase &tensorBase,
+                               const std::vector<uint32_t> &shape);
+  APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
+  APP_ERROR Process(const int &index, const std::string &datapath,
+                    const InitParam &initParam, std::vector<int> &outputss);
+  APP_ERROR ReadBin_int(const std::string &path, std::vector<std::vector<int64_t>> &dataset,
+                    const int shape);
+  APP_ERROR ReadBin_float(const std::string &path, std::vector<std::vector<float>> &dataset,
+                    const int shape);
+  // get infer time
+  double GetInferCostMilliSec() const {return inferCostTimeMilliSec;}
+
+
+ private:
+  std::shared_ptr<MxBase::ModelInferenceProcessor> model_Tbnet;
+  MxBase::ModelDesc modelDesc_;
+  uint32_t deviceId_ = 0;
+  // infer time
+  double inferCostTimeMilliSec = 0.0;
+};
+
+#endif
diff --git a/official/recommend/tbnet/infer/mxbase/src/main.cpp b/official/recommend/tbnet/infer/mxbase/src/main.cpp
new file mode 100644
index 000000000..d3ce994a9
--- /dev/null
+++ b/official/recommend/tbnet/infer/mxbase/src/main.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <fstream>
+#include "MxBase/Log/Log.h"
+#include "Tbnet.h"
+
+namespace {
+    const uint32_t DATA_SIZE = 18415;
+}  // namespace
+
+int main(int argc, char* argv[]) {
+    InitParam initParam = {};
+    initParam.deviceId = 0;
+    initParam.checkTensor = true;
+    initParam.modelPath = "../data/model/tbnet.om";
+    std::string dataPath = "../../preprocess_Result/";
+
+    auto model_Tbnet = std::make_shared<Tbnet>();
+    APP_ERROR ret = model_Tbnet->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "Tagging init failed, ret=" << ret << ".";
+        model_Tbnet->DeInit();
+        return ret;
+    }
+
+    std::vector<int> outputs;
+    for (int i=0; i < DATA_SIZE; i++) {
+        LogInfo << "processing " << i;
+        ret = model_Tbnet->Process(i, dataPath, initParam, outputs);
+        if (ret !=APP_ERR_OK) {
+            LogError << "Tbnet process failed, ret=" << ret << ".";
+            model_Tbnet->DeInit();
+            return ret;
+        }
+    }
+
+    model_Tbnet->DeInit();
+
+    double total_time = model_Tbnet->GetInferCostMilliSec() / 1000;
+    LogInfo<< "inferance total cost time: "<< total_time<< ", FPS: "<< DATA_SIZE/total_time;
+
+    return APP_ERR_OK;
+}
diff --git a/official/recommend/tbnet/infer/sdk/main.py b/official/recommend/tbnet/infer/sdk/main.py
new file mode 100644
index 000000000..1498f3aa9
--- /dev/null
+++ b/official/recommend/tbnet/infer/sdk/main.py
@@ -0,0 +1,113 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+""" main.py """
+import argparse
+import os
+from StreamManagerApi import StreamManagerApi, StringVector
+from StreamManagerApi import MxDataInput, InProtobufVector, MxProtobufIn
+import MxpiDataType_pb2 as MxpiDataType
+import numpy as np
+
+
+def parse_args(parsers):
+    """
+    Parse commandline arguments.
+    """
+    parsers.add_argument('--data_path', type=str,
+                         default="../../preprocess_Result",
+                         help='text path')
+    return parsers
+
+def create_protobuf(path, id1, shape):
+    # Construct the input of the stream
+    data_input = MxDataInput()
+    with open(path, 'rb') as f:
+        data = f.read()
+    data_input.data = data
+    tensorPackageList1 = MxpiDataType.MxpiTensorPackageList()
+    tensorPackage1 = tensorPackageList1.tensorPackageVec.add()
+    tensorVec1 = tensorPackage1.tensorVec.add()
+    tensorVec1.deviceId = 0
+    tensorVec1.memType = 0
+    for t in shape:
+        tensorVec1.tensorShape.append(t)
+    tensorVec1.dataStr = data_input.data
+    tensorVec1.tensorDataSize = len(data)
+
+    protobuf1 = MxProtobufIn()
+    protobuf1.key = b'appsrc%d' % id1
+    protobuf1.type = b'MxTools.MxpiTensorPackageList'
+    protobuf1.protobuf = tensorPackageList1.SerializeToString()
+
+    return protobuf1
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description='Om tbnet Inference')
+    parser = parse_args(parser)
+    args, _ = parser.parse_known_args()
+    # init stream manager
+    stream_manager = StreamManagerApi()
+    ret = stream_manager.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        exit()
+
+    # create streams by pipeline config file
+    with open("../data/config/tbnet.pipeline", 'rb') as fl:
+        pipeline = fl.read()
+    ret = stream_manager.CreateMultipleStreams(pipeline)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        exit()
+
+    # Construct the input of the stream
+
+    res_dir_name = 'result'
+    if not os.path.exists(res_dir_name):
+        os.makedirs(res_dir_name)
+
+    results = []
+    input_names = ['00_item', '01_rl1', '02_ety', '03_rl2', '04_his', '05_rate']
+    shape_list = [[1], [1, 39], [1, 39], [1, 39], [1, 39], [1]]
+
+    for idx in range(18415):
+        print('infer %d' % idx)
+        for index, name in enumerate(input_names):
+            protobufVec = InProtobufVector()
+            path_tmp = os.path.join(args.data_path, name,
+                                    'tbnet_' + name.split('_')[1] + '_bs1_' + str(idx) + '.bin')
+            protobufVec.push_back(create_protobuf(path_tmp, index, shape_list[index]))
+            unique_id = stream_manager.SendProtobuf(b'tbnet', b'appsrc%d' % index, protobufVec)
+
+        keyVec = StringVector()
+        keyVec.push_back(b'mxpi_tensorinfer0')
+        infer_result = stream_manager.GetProtobuf(b'tbnet', 0, keyVec)
+        if infer_result.size() == 0:
+            print("inferResult is null")
+            exit()
+        if infer_result[0].errorCode != 0:
+            print("GetProtobuf error. errorCode=%d" % (
+                infer_result[0].errorCode))
+            exit()
+        # get infer result
+        result = MxpiDataType.MxpiTensorPackageList()
+        result.ParseFromString(infer_result[0].messageBuf)
+        for i in range(4):
+            res = np.frombuffer(result.tensorPackageVec[0].tensorVec[i].dataStr, dtype=np.float32)
+            np.savetxt("./result/tbnet_item_bs1_%d_%d.txt" % (idx, i), res, fmt='%.06f')
+
+    # destroy streams
+    stream_manager.DestroyAllStreams()
diff --git a/official/recommend/tbnet/infer/sdk/prec/postprocess.py b/official/recommend/tbnet/infer/sdk/prec/postprocess.py
new file mode 100644
index 000000000..413757245
--- /dev/null
+++ b/official/recommend/tbnet/infer/sdk/prec/postprocess.py
@@ -0,0 +1,90 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""preprocess data"""
+import argparse
+import os
+import numpy as np
+
+parser = argparse.ArgumentParser(description='Postprocess of Hypertext Inference')
+parser.add_argument('--result_Path', type=str, default='./result_Files',
+                    help='result path')
+parser.add_argument('--label_Path', default='./result_Files', type=str,
+                    help='label file path')
+parser.add_argument('--batch_size', default=1, type=int, help='batch_size')
+args = parser.parse_args()
+
+def calculate_auc(labels_list, preds_list):
+    """
+    The AUC calculation function
+    Input:
+            labels_list: list of true label
+            preds_list:  list of predicted label
+    Outputs
+            Float, means of AUC
+    """
+    auc = []
+    n_bins = labels_list.shape[0] // 2
+    if labels_list.ndim == 1:
+        labels_list = labels_list.reshape(-1, 1)
+        preds_list = preds_list.reshape(-1, 1)
+    for i in range(labels_list.shape[1]):
+        labels = labels_list[:, i]
+        preds = preds_list[:, i]
+        postive_len = labels.sum()
+        negative_len = labels.shape[0] - postive_len
+        total_case = postive_len * negative_len
+        positive_histogram = np.zeros((n_bins))
+        negative_histogram = np.zeros((n_bins))
+        bin_width = 1.0 / n_bins
+
+        for j, _ in enumerate(labels):
+            nth_bin = int(preds[j] // bin_width)
+            if labels[j]:
+                positive_histogram[nth_bin] = positive_histogram[nth_bin] + 1
+            else:
+                negative_histogram[nth_bin] = negative_histogram[nth_bin] + 1
+
+        accumulated_negative = 0
+        satisfied_pair = 0
+        for k in range(n_bins):
+            satisfied_pair += (
+                positive_histogram[k] * accumulated_negative +
+                positive_histogram[k] * negative_histogram[k] * 0.5)
+            accumulated_negative += negative_histogram[k]
+        auc.append(satisfied_pair / total_case)
+
+    return np.mean(auc)
+
+dirs = os.listdir(args.label_Path)
+cur, total = 0, 0
+print('---------- start cal acc ----------')
+gt_list = []
+pred_list = []
+for file in dirs:
+    label = np.fromfile(os.path.join(args.label_Path, file), dtype=np.float32)
+    gt_list.append(label)
+
+    file_name = file.split('.')[0]
+    idx = file_name.split('_')[-1]
+    predict_file_name = "tbnet_item_bs1_" + str(idx) + "_1.txt"
+    predict_file = os.path.join(args.result_Path, predict_file_name)
+
+    predict = np.loadtxt(predict_file, dtype=np.float32).reshape(1)
+    pred_list.append(predict)
+
+res_pred = np.concatenate(pred_list, axis=0)
+res_true = np.concatenate(gt_list, axis=0)
+rst_auc = calculate_auc(res_true, res_pred)
+print('auc:', rst_auc)
diff --git a/official/recommend/tbnet/infer/sdk/run.sh b/official/recommend/tbnet/infer/sdk/run.sh
new file mode 100644
index 000000000..00dcdb1da
--- /dev/null
+++ b/official/recommend/tbnet/infer/sdk/run.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3 main.py
+exit 0
\ No newline at end of file
diff --git a/official/recommend/tbnet/modelarts/train_modelarts.py b/official/recommend/tbnet/modelarts/train_modelarts.py
new file mode 100644
index 000000000..bbb8029e3
--- /dev/null
+++ b/official/recommend/tbnet/modelarts/train_modelarts.py
@@ -0,0 +1,158 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""TB-Net training."""
+
+import os
+import argparse
+
+import numpy as np
+from mindspore import context, Model, Tensor, export
+from mindspore.train.serialization import save_checkpoint
+from mindspore.train.callback import Callback, TimeMonitor
+
+from src import tbnet, config, metrics, dataset
+
+
+class MyLossMonitor(Callback):
+    """My loss monitor definition."""
+
+    def epoch_end(self, run_context):
+        """Print loss at each epoch end."""
+        cb_params = run_context.original_args()
+        loss = cb_params.net_outputs
+
+        if isinstance(loss, (tuple, list)):
+            if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
+                loss = loss[0]
+
+        if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
+            loss = np.mean(loss.asnumpy())
+        print('loss:' + str(loss))
+
+
+def get_args():
+    """Parse commandline arguments."""
+    parser = argparse.ArgumentParser(description='Train TBNet.')
+
+    parser.add_argument(
+        '--dataset',
+        type=str,
+        required=False,
+        default='steam',
+        help="'steam' dataset is supported currently"
+    )
+
+    parser.add_argument(
+        '--data_url',
+        type=str,
+        required=False,
+        default='train.csv',
+        help="the train csv datafile inside the dataset folder"
+    )
+
+    parser.add_argument(
+        '--train_url',
+        type=str,
+        required=False,
+        default='',
+        help="data to ckpt"
+    )
+
+    parser.add_argument(
+        '--device_id',
+        type=int,
+        required=False,
+        default=0,
+        help="device id"
+    )
+
+    parser.add_argument(
+        '--epochs',
+        type=int,
+        required=False,
+        default=20,
+        help="number of training epochs"
+    )
+
+    parser.add_argument(
+        '--device_target',
+        type=str,
+        required=False,
+        default='GPU',
+        help="run code on GPU"
+    )
+
+    parser.add_argument(
+        '--run_mode',
+        type=str,
+        required=False,
+        default='graph',
+        choices=['graph', 'pynative'],
+        help="run code by GRAPH mode or PYNATIVE mode"
+    )
+
+    return parser.parse_args()
+
+
+def train_tbnet():
+    """Training process."""
+    args = get_args()
+
+    home = os.path.dirname(os.path.realpath(__file__))
+    config_path = os.path.join(home, 'data', args.dataset, 'config.json')
+    train_csv_path = args.data_url + '/train.csv'
+    ckpt_path = args.train_url
+
+    context.set_context(device_id=args.device_id)
+    if args.run_mode == 'graph':
+        context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
+    else:
+        context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target)
+
+    if not os.path.exists(ckpt_path):
+        os.makedirs(ckpt_path)
+
+    print(f"creating dataset from {train_csv_path}...")
+    net_config = config.TBNetConfig(config_path)
+    train_ds = dataset.create(train_csv_path, net_config.per_item_num_paths, train=True).batch(net_config.batch_size)
+    print('datasize:', train_ds.get_dataset_size())
+    print("creating TBNet for training...")
+    network = tbnet.TBNet(net_config)
+    loss_net = tbnet.NetWithLossClass(network, net_config)
+    train_net = tbnet.TrainStepWrap(loss_net, net_config.lr)
+    train_net.set_train()
+    eval_net = tbnet.PredictWithSigmoid(network)
+    time_callback = TimeMonitor(data_size=train_ds.get_dataset_size())
+    loss_callback = MyLossMonitor()
+    model = Model(network=train_net, eval_network=eval_net, metrics={'auc': metrics.AUC(), 'acc': metrics.ACC()})
+    print("training...")
+    model.train(epoch=args.epochs, train_dataset=train_ds,
+                callbacks=[time_callback, loss_callback], dataset_sink_mode=False)
+
+    save_checkpoint(network, os.path.join(ckpt_path, f'tbnet.ckpt'))
+
+    eval_net = tbnet.PredictWithSigmoid(network)
+
+    item = Tensor(np.ones((1,)).astype(np.int))
+    rl1 = Tensor(np.ones((1, 39)).astype(np.int))
+    ety = Tensor(np.ones((1, 39)).astype(np.int))
+    rl2 = Tensor(np.ones((1, 39)).astype(np.int))
+    his = Tensor(np.ones((1, 39)).astype(np.int))
+    rate = Tensor(np.ones((1,)).astype(np.float32))
+    inputs = [item, rl1, ety, rl2, his, rate]
+    export(eval_net, *inputs, file_name=ckpt_path + '/tbnet.air', file_format='AIR')
+
+if __name__ == '__main__':
+    train_tbnet()
diff --git a/official/recommend/tbnet/scripts/docker_start.sh b/official/recommend/tbnet/scripts/docker_start.sh
new file mode 100644
index 000000000..65023c664
--- /dev/null
+++ b/official/recommend/tbnet/scripts/docker_start.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright (c) 2022. Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+docker_image=$1
+data_dir=$2
+model_dir=$3
+
+docker run -it -u root--ipc=host \
+               --device=/dev/davinci0 \
+               --device=/dev/davinci1 \
+               --device=/dev/davinci2 \
+               --device=/dev/davinci3 \
+               --device=/dev/davinci4 \
+               --device=/dev/davinci5 \
+               --device=/dev/davinci6 \
+               --device=/dev/davinci7 \
+               --device=/dev/davinci_manager \
+               --device=/dev/devmm_svm \
+               --device=/dev/hisi_hdc \
+               --privileged \
+               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons \
+               -v ${data_dir}:${data_dir} \
+               -v ${model_dir}:${model_dir} \
+               -v /root/ascend/log:/root/ascend/log ${docker_image} /bin/bash
\ No newline at end of file
-- 
GitLab