diff --git a/research/nlp/hypertext/infer/convert/air2om.sh b/research/nlp/hypertext/infer/convert/air2om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c6df52c8abe67487ff2144f076531438ead19530
--- /dev/null
+++ b/research/nlp/hypertext/infer/convert/air2om.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+model_type=$1
+output_name=$2
+
+atc --model=${model_type} \
+    --framework=1 \
+    --output=${output_name} \
+    --soc_version=Ascend310
diff --git a/research/nlp/hypertext/infer/data/config/hypertext.pipline b/research/nlp/hypertext/infer/data/config/hypertext.pipline
new file mode 100644
index 0000000000000000000000000000000000000000..fbb693fa0cb5e7e7b8843d53ae9361a3d75a35f3
--- /dev/null
+++ b/research/nlp/hypertext/infer/data/config/hypertext.pipline
@@ -0,0 +1,42 @@
+{
+    "hypertext": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:0"
+        },
+        "appsrc1": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:1"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource":"appsrc0,appsrc1",
+                "modelPath": "../data/model/tnews.om"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_tensorinfer0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+    }
+}
diff --git a/research/nlp/hypertext/infer/docker_start_infer.sh b/research/nlp/hypertext/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c76879755acc27849fd8ca75ab0fcd76daea1658
--- /dev/null
+++ b/research/nlp/hypertext/infer/docker_start_infer.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+share_dir=$2
+data_dir=$3
+echo "$1"
+echo "$2"
+if [ -z "${docker_image}" ]; then
+    echo "please input docker_image"
+    exit 1
+fi
+
+if [ ! -d "${share_dir}" ]; then
+    echo "please input share directory that contains dataset, models and codes"
+    exit 1
+fi
+
+
+docker run -it -u root \
+    --device=/dev/davinci0 \
+    --device=/dev/davinci_manager \
+    --device=/dev/devmm_svm \
+    --device=/dev/hisi_hdc \
+    --privileged \
+    -v //usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
+    -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+    -v ${data_dir}:${data_dir}  \
+    -v ${share_dir}:${share_dir} \
+    ${docker_image} \
+    /bin/bash
diff --git a/research/nlp/hypertext/infer/mxbase/CMakeLists.txt b/research/nlp/hypertext/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..04eea657665d9b10ad6e153cf08de054d33348ca
--- /dev/null
+++ b/research/nlp/hypertext/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,51 @@
+cmake_minimum_required(VERSION 3.10.0)
+project(hypertext)
+
+set(TARGET hypertext)
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+add_definitions(-Dgoogle=mindxsdk_private)
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
+
+# Check environment variable
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+if(NOT DEFINED ENV{ASCEND_VERSION})
+    message(WARNING "please define environment variable:ASCEND_VERSION")
+endif()
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include)
+set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64)
+
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
+if(DEFINED ENV{MXSDK_OPENSOURCE_DIR})
+    set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
+else()
+    set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource)
+endif()
+
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+add_executable(${TARGET} src/main.cpp src/Hypertext.cpp)
+target_link_libraries(${TARGET} glog cpprest mxbase opencv_world stdc++fs)
+
+install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
diff --git a/research/nlp/hypertext/infer/mxbase/build.sh b/research/nlp/hypertext/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1c895af4571e03c79358cc6158949eb439e2f1f7
--- /dev/null
+++ b/research/nlp/hypertext/infer/mxbase/build.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+path_cur=$(dirname $0)
+
+# set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user
+if [ ! "${ASCEND_VERSION}" ]; then
+    export ASCEND_VERSION=ascend-toolkit/latest
+    echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}"
+else
+    echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user"
+fi
+
+if [ ! "${ARCH_PATTERN}" ]; then
+    # set ARCH_PATTERN to ./ when it was not specified by user
+    export ARCH_PATTERN=./
+    echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}"
+else
+    echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user"
+fi
+
+cd $path_cur
+rm -rf build
+mkdir -p build
+cd build
+cmake ..
+make
+ret=$?
+if [ ${ret} -ne 0 ]; then
+    echo "Failed to build bert."
+    exit ${ret}
+fi
+make install
diff --git a/research/nlp/hypertext/infer/mxbase/run.sh b/research/nlp/hypertext/infer/mxbase/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..35f29869d0a7339043843aebd3810c246714b935
--- /dev/null
+++ b/research/nlp/hypertext/infer/mxbase/run.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+model_type=$1
+maxLength=$2
+modelPath=$3
+inferIdsPath=$4
+inferNgradPath=$5
+resultName=$6
+
+# run
+./build/hypertext ${model_type} ${maxLength} ${modelPath} ${inferIdsPath} ${inferNgradPath} ${resultName}
diff --git a/research/nlp/hypertext/infer/mxbase/src/Hypertext.cpp b/research/nlp/hypertext/infer/mxbase/src/Hypertext.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9e5b6f5e1f3638edf754fd66a8b06686422789e9
--- /dev/null
+++ b/research/nlp/hypertext/infer/mxbase/src/Hypertext.cpp
@@ -0,0 +1,244 @@
+/**
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hypertext.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <fstream>
+#include <map>
+
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+APP_ERROR HypertextNerBase::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    maxLength_ = initParam.maxLength;
+    resultName_ = initParam.resultName;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
+    ret = dvppWrapper_->Init();
+    if (ret != APP_ERR_OK) {
+        LogError << "DvppWrapper init failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR HypertextNerBase::DeInit() {
+    dvppWrapper_->DeInit();
+    model_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR HypertextNerBase::ReadTensorFromFile(const std::string &file, int32_t *data,
+                                               uint32_t size) {
+    if (data == NULL) {
+        LogError << "input data is invalid.";
+        return APP_ERR_COMM_INVALID_POINTER;
+    }
+    std::ifstream fp(file);
+    std::string line;
+    while (std::getline(fp, line)) {
+        std::string number;
+        std::istringstream readstr(line);
+        for (uint32_t j = 0; j < size; j++) {
+            std::getline(readstr, number, ' ');
+            data[j] = atoi(number.c_str());
+        }
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR HypertextNerBase::ReadInputTensor(int32_t *data, const std::string &fileName,
+                                            uint32_t index,
+                                            std::vector<MxBase::TensorBase> *inputs,
+                                            const uint32_t size) {
+    const uint32_t dataSize = modelDesc_.inputTensors[index].tensorSize;
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE,
+                                     deviceId_);
+    MxBase::MemoryData memoryDataSrc(reinterpret_cast<void *>(data), dataSize,
+                                     MxBase::MemoryData::MEMORY_HOST_MALLOC);
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc and copy failed.";
+        return ret;
+    }
+    std::vector<uint32_t> shape = {1, size};
+    inputs->push_back(MxBase::TensorBase(memoryDataDst, false, shape,
+                                         MxBase::TENSOR_DTYPE_INT32));
+    delete[] data;
+    return APP_ERR_OK;
+}
+
+APP_ERROR HypertextNerBase::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                                      std::vector<MxBase::TensorBase> *outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i],
+                                  MxBase::MemoryData::MemoryType::MEMORY_DEVICE,
+                                  deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs->push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_->ModelInference(inputs, *outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs =
+            std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    g_inferCost.push_back(costMs);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR HypertextNerBase::PostProcess(std::vector<MxBase::TensorBase> *outputs,
+                                        std::vector<uint32_t> *predict) {
+    MxBase::TensorBase &tensor = outputs->at(0);
+    APP_ERROR ret = tensor.ToHost();
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Tensor deploy to host failed.";
+        return ret;
+    }
+    // check tensor is available
+    auto outputShape = tensor.GetShape();
+    uint32_t length = outputShape[0];
+    void *data = tensor.GetBuffer();
+    for (uint32_t i = 0; i < length; i++) {
+        int32_t value = *(reinterpret_cast<int32_t *>(data) + i);
+        predict->push_back(value);
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR HypertextNerBase::WriteResult(const std::string &fileName,
+                                        const std::vector<uint32_t> &predict) {
+    std::string resultPathName = "result";
+    // create result directory when it does not exit
+    if (access(resultPathName.c_str(), 0) != 0) {
+        int ret = mkdir(resultPathName.c_str(), S_IRUSR | S_IWUSR | S_IXUSR);
+        if (ret != 0) {
+            LogError << "Failed to create result directory: " << resultPathName
+                     << ", ret = " << ret;
+            return APP_ERR_COMM_OPEN_FAIL;
+        }
+    }
+    // create result file under result directory
+    resultPathName = resultPathName + "/" + resultName_;
+    std::ofstream tfile(resultPathName, std::ofstream::app);
+    if (tfile.fail()) {
+        LogError << "Failed to open result file: " << resultPathName;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    // write inference result into file
+    LogInfo << "==============================================================";
+    LogInfo << "Infer finished!";
+
+    tfile << predict[0];
+    tfile << std::endl;
+
+    LogInfo << "==============================================================";
+    tfile.close();
+    return APP_ERR_OK;
+}
+
+APP_ERROR HypertextNerBase::Process(const std::string &inferIdsPath, const std::string &inferNgradPath,
+                                    const std::string &fileName) {
+    std::string inputAdjacencyFile = inferIdsPath;
+    std::string inputFeatureFile = inferNgradPath;
+
+    APP_ERROR ret;
+    std::ifstream fp1(inputAdjacencyFile);
+    std::string line1;
+    std::ifstream fp2(inputFeatureFile);
+    std::string line2;
+    while (std::getline(fp1, line1), std::getline(fp2, line2)) {
+        int32_t *data1 = new int32_t[maxLength_];
+        int32_t *data2 = new int32_t[maxLength_];
+        std::vector<MxBase::TensorBase> inputs = {};
+        std::vector<MxBase::TensorBase> outputs = {};
+        std::string number1;
+        std::istringstream readstr1(line1);
+        std::string number2;
+        std::istringstream readstr2(line2);
+        for (uint32_t j = 0; j < maxLength_; j++) {
+            std::getline(readstr1, number1, ' ');
+            data1[j] = atoi(number1.c_str());
+            std::getline(readstr2, number2, ' ');
+            data2[j] = atoi(number2.c_str());
+        }
+        ret = ReadInputTensor(data1, inputAdjacencyFile, INPUT_ADJACENCY, &inputs, maxLength_);
+        if (ret != APP_ERR_OK) {
+            LogError << "Read input adjacency failed, ret=" << ret << ".";
+            return ret;
+        }
+        ret = ReadInputTensor(data2, inputFeatureFile, INPUT_FEATURE, &inputs,
+                              maxLength_);
+        if (ret != APP_ERR_OK) {
+            LogError << "Read input feature file failed, ret=" << ret << ".";
+            return ret;
+        }
+
+        ret = Inference(inputs, &outputs);
+        if (ret != APP_ERR_OK) {
+            LogError << "Inference failed, ret=" << ret << ".";
+            return ret;
+        }
+        std::vector<uint32_t> predict;
+        ret = PostProcess(&outputs, &predict);
+        if (ret != APP_ERR_OK) {
+            LogError << "PostProcess failed, ret=" << ret << ".";
+            return ret;
+        }
+        ret = WriteResult(fileName, predict);
+        if (ret != APP_ERR_OK) {
+            LogError << "save result failed, ret=" << ret << ".";
+            return ret;
+        }
+    }
+    return APP_ERR_OK;
+}
diff --git a/research/nlp/hypertext/infer/mxbase/src/Hypertext.h b/research/nlp/hypertext/infer/mxbase/src/Hypertext.h
new file mode 100644
index 0000000000000000000000000000000000000000..61cd5dac1f38eaf2c7b83d8ae281274329720268
--- /dev/null
+++ b/research/nlp/hypertext/infer/mxbase/src/Hypertext.h
@@ -0,0 +1,74 @@
+/**
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_HYPERTEXTBASE_H
+#define MXBASE_HYPERTEXTBASE_H
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+extern std::vector<double> g_inferCost;
+
+struct InitParam {
+    uint32_t deviceId;
+    uint32_t maxLength;
+    std::string modelType;
+    std::string modelPath;
+    std::string inferIdsPath;
+    std::string inferNgradPath;
+    std::string resultName;
+};
+
+enum DataIndex {
+    INPUT_ADJACENCY = 0,
+    INPUT_FEATURE = 1,
+};
+
+class HypertextNerBase {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+    APP_ERROR DeInit();
+    APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs,
+                        std::vector<MxBase::TensorBase> *outputs);
+    APP_ERROR Process(const std::string &inferIdsPath, const std::string &inferNgradPath, const std::string &fileName);
+    APP_ERROR PostProcess(std::vector<MxBase::TensorBase> *outputs,
+                          std::vector<uint32_t> *predict);
+
+ protected:
+    APP_ERROR ReadTensorFromFile(const std::string &file, int32_t *data,
+                                 uint32_t size);
+    APP_ERROR ReadInputTensor(int32_t *data, const std::string &fileName, uint32_t index,
+                              std::vector<MxBase::TensorBase> *inputs,
+                              const uint32_t size);
+    APP_ERROR WriteResult(const std::string &fileName,
+                          const std::vector<uint32_t> &predict);
+
+ private:
+    std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    MxBase::ModelDesc modelDesc_ = {};
+    uint32_t deviceId_ = 0;
+    uint32_t maxLength_ = 0;
+    std::string resultName_ = "";
+};
+#endif
diff --git a/research/nlp/hypertext/infer/mxbase/src/main.cpp b/research/nlp/hypertext/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a55dd0b98d67855ad0e9c2369c15859c592e71b7
--- /dev/null
+++ b/research/nlp/hypertext/infer/mxbase/src/main.cpp
@@ -0,0 +1,76 @@
+/**
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+#include "Hypertext.h"
+#include "MxBase/Log/Log.h"
+
+std::vector<double> g_inferCost;
+
+void InitHypertextParam(InitParam* initParam) {
+    initParam->deviceId = 0;
+    initParam->modelType = "tnews";
+    initParam->maxLength = 40;
+    initParam->modelPath = "../data/model/tnews.om";
+    initParam->inferIdsPath = "../data/input/tnews_infer_txt/hypertext_ids_bs1_57404.txt";
+    initParam->inferNgradPath = "../data/input/tnews_infer_txt/hypertext_ngrad_bs1_57404.txt";
+    initParam->resultName = "result_tnews.txt";
+}
+
+int main(int argc, char* argv[]) {
+    if (argc < 6) {
+        LogWarn << "Please input model_type, max_length, model_path, infer_id_path, infer_ngrad_path and result_name.";
+        return APP_ERR_OK;
+    }
+    InitParam initParam;
+    InitHypertextParam(&initParam);
+    initParam.modelType = argv[1];
+    initParam.maxLength = atoi(argv[2]);
+    initParam.modelPath = argv[3];
+    initParam.inferIdsPath = argv[4];
+    initParam.inferNgradPath = argv[5];
+    initParam.resultName = argv[6];
+    auto hypertextBase = std::make_shared<HypertextNerBase>();
+    APP_ERROR ret = hypertextBase->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "Hypertextbase init failed, ret=" << ret << ".";
+        return ret;
+    }
+    // process
+    ret = hypertextBase->Process(initParam.inferIdsPath, initParam.inferNgradPath, initParam.modelType);
+    if (ret != APP_ERR_OK) {
+        LogError << "Hypertextbase process failed, ret=" << ret << ".";
+        hypertextBase->DeInit();
+        return ret;
+    }
+    hypertextBase->DeInit();
+    double costSum = 0;
+    for (uint32_t i = 0; i < g_inferCost.size(); i++) {
+        costSum += g_inferCost[i];
+    }
+    LogInfo << "Infer texts sum " << g_inferCost.size()
+            << ", cost total time: " << costSum << " ms.";
+    LogInfo << "The throughput: " << g_inferCost.size() * 1000 / costSum
+            << " bin/sec.";
+    return APP_ERR_OK;
+}
diff --git a/research/nlp/hypertext/infer/sdk/main.py b/research/nlp/hypertext/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb70eb9f04a343706ebc6450bcff2a2d6076cb84
--- /dev/null
+++ b/research/nlp/hypertext/infer/sdk/main.py
@@ -0,0 +1,153 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""
+sample script of CLUE infer using SDK run in docker
+"""
+
+import argparse
+import os
+
+import datetime
+import numpy as np
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StreamManagerApi, InProtobufVector, MxProtobufIn, StringVector, MxDataInput
+
+def parse_args():
+    """set and check parameters."""
+    parser = argparse.ArgumentParser(description="Mass process")
+    parser.add_argument("--pipeline", type=str, default="../data/config/hypertext.pipline", help="SDK infer pipeline")
+    parser.add_argument("--data_dir", type=str, default="../data/input",
+                        help="Dataset contain input_ids, input_mask, segment_ids, label_ids")
+    parser.add_argument("--data_type", type=str, default="iflytek", help="Dataset type")
+    parser.add_argument("--output_dir", type=str, default="./result", help="save result to file")
+    args_opt = parser.parse_args()
+    return args_opt
+
+
+def send_source_data(tensor, tensor_bytes, name, manager_api, in_plugin_id):
+    """
+    Construct the input of the stream,
+    send inputs data to a specified stream based on streamName.
+
+    Returns:
+        bool: send data success or not
+    """
+    tensorPackageList = MxpiDataType.MxpiTensorPackageList()
+    tensorPackage = tensorPackageList.tensorPackageVec.add()
+    dataInput = MxDataInput()
+    dataInput.data = tensor_bytes
+    tensorVec = tensorPackage.tensorVec.add()
+    tensorVec.deviceId = 0
+    tensorVec.memType = 0
+    for t in tensor.shape:
+        tensorVec.tensorShape.append(t)
+    tensorVec.dataStr = dataInput.data
+    tensorVec.tensorDataSize = len(tensor_bytes)
+    key = "appsrc{}".format(in_plugin_id).encode('utf-8')
+    protobufVec = InProtobufVector()
+    protobuf = MxProtobufIn()
+    protobuf.key = key
+    protobuf.type = b'MxTools.MxpiTensorPackageList'
+    protobuf.protobuf = tensorPackageList.SerializeToString()
+    protobufVec.push_back(protobuf)
+    unique_id = manager_api.SendProtobuf(name, in_plugin_id, protobufVec)
+    if unique_id < 0:
+        print("Failed to send data to stream.")
+        exit()
+
+
+if __name__ == '__main__':
+    args = parse_args()
+
+    # init stream manager
+    stream_manager_api = StreamManagerApi()
+    ret = stream_manager_api.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        exit()
+
+    # create streams by pipeline config file
+    with open(args.pipeline, 'rb') as f:
+        pipelineStr = f.read()
+    ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        exit()
+
+    # Construct the input of the stream
+    infer_total_time = 0
+    if args.data_type == "tnews":
+        ids_path = os.path.join(args.data_dir, "tnews_infer_txt/hypertext_ids_bs1_57404.txt")
+        ngrad_path = os.path.join(args.data_dir, "tnews_infer_txt/hypertext_ngrad_bs1_57404.txt")
+        ids = np.loadtxt(ids_path, dtype=np.int32).reshape(-1, 40)
+        ngrad = np.loadtxt(ngrad_path, dtype=np.int32).reshape(-1, 40)
+        output_name = "output_tnews.txt"
+    elif args.data_type == "iflytek":
+        ids_path = os.path.join(args.data_dir, "iflytek_infer_txt/hypertext_ids_bs1_3082.txt")
+        ngrad_path = os.path.join(args.data_dir, "iflytek_infer_txt/hypertext_ngrad_bs1_3082.txt")
+        ids = np.loadtxt(ids_path, dtype=np.int32).reshape(-1, 1000)
+        ngrad = np.loadtxt(ngrad_path, dtype=np.int32).reshape(-1, 1000)
+        output_name = "output_iflytek.txt"
+    else:
+        print("Unsupported data type")
+        exit()
+    stream_name = b'hypertext'
+    num = ids.shape[0]
+    res = ""
+    for idx in range(num):
+        tensor0 = ids[idx]
+        tensor0 = np.expand_dims(tensor0, 0)
+        tensor_bytes0 = tensor0.tobytes()
+        send_source_data(tensor0, tensor_bytes0, stream_name, stream_manager_api, 0)
+
+        tensor1 = ngrad[idx]
+        tensor1 = np.expand_dims(tensor1, 0)
+        tensor_bytes1 = tensor1.tobytes()
+        send_source_data(tensor1, tensor_bytes1, stream_name, stream_manager_api, 1)
+
+        # Obtain the inference result by specifying streamName and uniqueId.
+        start_time = datetime.datetime.now()
+        keyVec = StringVector()
+        keyVec.push_back(b'mxpi_tensorinfer0')
+        infer_result = stream_manager_api.GetProtobuf(stream_name, 0, keyVec)
+        if infer_result.size() == 0:
+            print("inferResult is null")
+            exit()
+        if infer_result[0].errorCode != 0:
+            print("GetProtobuf error. errorCode=%d" % (
+                infer_result[0].errorCode))
+            exit()
+
+        # get infer result
+        result = MxpiDataType.MxpiTensorPackageList()
+        result.ParseFromString(infer_result[0].messageBuf)
+
+        # convert the inference result to Numpy array
+        output = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.int32)
+        # output = output.reshape(31)
+        for x in output:
+            res = res + str(x) + ' '
+        res = res + '\n'
+        print(output)
+
+    # save infer result
+    if not os.path.exists(args.output_dir):
+        os.mkdir(args.output_dir)
+    with open(os.path.join(args.output_dir, output_name), "w") as f:
+        f.write(res)
+
+    # destroy streams
+    stream_manager_api.DestroyAllStreams()
diff --git a/research/nlp/hypertext/infer/sdk/run.sh b/research/nlp/hypertext/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5c74a99740471a9bf493909c3b5081860f33e119
--- /dev/null
+++ b/research/nlp/hypertext/infer/sdk/run.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH}
+export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
+export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+model_type=$1
+python3 main.py --data_type ${model_type}
+exit 0
diff --git a/research/nlp/hypertext/infer/utils/parse_output.sh b/research/nlp/hypertext/infer/utils/parse_output.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ac776e025207f8fef22c17c8d419b30c9506e315
--- /dev/null
+++ b/research/nlp/hypertext/infer/utils/parse_output.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+result_Path=$1
+label_Path=$2
+
+python3 postprocess_infer.py --result_Path ${result_Path} --label_Path ${label_Path}
diff --git a/research/nlp/hypertext/infer/utils/postprocess_infer.py b/research/nlp/hypertext/infer/utils/postprocess_infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..a771cd93d7658a151e7c7572e2ab6345532ab1d2
--- /dev/null
+++ b/research/nlp/hypertext/infer/utils/postprocess_infer.py
@@ -0,0 +1,35 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""postprocess_infer data"""
+
+import argparse
+import numpy as np
+
+parser = argparse.ArgumentParser(description='Postprocess of Hypertext Inference')
+parser.add_argument('--result_Path', type=str, help='result path')
+parser.add_argument('--label_Path', type=str, help='label file path')
+args = parser.parse_args()
+
+cur, total = 0, 0
+
+label = np.loadtxt(args.label_Path, dtype=np.int32).reshape(-1, 1)
+predict = np.loadtxt(args.result_Path, dtype=np.int32).reshape(-1, 1)
+
+for i in range(label.shape[0]):
+    acc = predict[i] == label[i]
+    acc = np.array(acc, dtype=np.float32)
+    cur += (np.sum(acc, -1))
+    total += len(acc)
+print('acc:', cur / total)
diff --git a/research/nlp/hypertext/infer/utils/preprocess_infer.py b/research/nlp/hypertext/infer/utils/preprocess_infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f800b1c46847905b0d826c3a6a2e5d0b71d943c
--- /dev/null
+++ b/research/nlp/hypertext/infer/utils/preprocess_infer.py
@@ -0,0 +1,83 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""preprocess-infer data"""
+import argparse
+import os
+import shutil
+
+from mindspore import context
+import numpy as np
+from src.config import Config
+from src.dataset import build_dataset, build_dataloader
+
+parser = argparse.ArgumentParser(description='HyperText Text Classification')
+parser.add_argument('--model', type=str, default='HyperText',
+                    help='HyperText')
+parser.add_argument('--datasetdir', default='./data/iflytek_public', type=str,
+                    help='dataset dir iflytek_public tnews_public')
+parser.add_argument('--outputdir', default='./data/iflytek_public', type=str,
+                    help='dataset dir iflytek_public tnews_public')
+parser.add_argument('--batch_size', default=1, type=int, help='batch_size')
+parser.add_argument('--datasetType', default='iflytek', type=str, help='iflytek/tnews')
+parser.add_argument('--device', default='Ascend', type=str, help='device GPU Ascend')
+args = parser.parse_args()
+
+config = Config(args.datasetdir, None, args.device)
+if args.datasetType == 'tnews':
+    config.useTnews()
+else:
+    config.useIflyek()
+if config.device == 'GPU':
+    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
+elif config.device == 'Ascend':
+    context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
+vocab, train_data, dev_data, test_data = build_dataset(config, use_word=True, min_freq=int(config.min_freq))
+test_iter = build_dataloader(test_data, args.batch_size, config.max_length)
+config.n_vocab = len(vocab)
+output_path = os.path.join(args.outputdir, args.datasetType)
+if os.path.isdir(output_path):
+    shutil.rmtree(output_path)
+
+os.makedirs(output_path)
+
+def w2txt(file, data):
+    with open(file, "w") as f:
+        for i in range(data.shape[0]):
+            s = ' '.join(str(num) for num in data[i, 0])
+            f.write(s+"\n")
+
+print('----------start test model-------------')
+idx = 0
+
+ids_sents = []
+ngrad_sents = []
+label_sents = []
+for d in test_iter.create_dict_iterator():
+    ids_sents.append(d['ids'].asnumpy().astype(np.int32))
+    ngrad_sents.append(d['ngrad_ids'].asnumpy().astype(np.int32))
+    label_sents.append(d['label'].asnumpy().astype(np.int32))
+    idx += 1
+
+ids_sents = np.array(ids_sents).astype(np.int32)
+ngrad_sents = np.array(ngrad_sents).astype(np.int32)
+label_sents = np.array(label_sents).astype(np.int32)
+
+ids_name = "hypertext_ids_bs" + str(args.batch_size) + "_" + str(idx) + ".txt"
+ngrad_name = "hypertext_ngrad_bs" + str(args.batch_size) + "_" + str(idx) + ".txt"
+label_name = "hypertext_label_bs" + str(args.batch_size) + "_" + str(idx) + ".txt"
+
+w2txt(os.path.join(args.outputdir, ids_name), ids_sents)
+w2txt(os.path.join(args.outputdir, ngrad_name), ngrad_sents)
+w2txt(os.path.join(args.outputdir, label_name), label_sents)
diff --git a/research/nlp/hypertext/modelarts/train_start.py b/research/nlp/hypertext/modelarts/train_start.py
new file mode 100644
index 0000000000000000000000000000000000000000..5abae6cf954fb8fa07cfc0987cadce283cd7aff8
--- /dev/null
+++ b/research/nlp/hypertext/modelarts/train_start.py
@@ -0,0 +1,171 @@
+#Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train and export file"""
+import argparse
+import os
+import numpy as np
+
+from mindspore import load_checkpoint, load_param_into_net, context, Model, Tensor
+from mindspore.communication import management as MultiDevice
+from mindspore.context import ParallelMode
+from mindspore.communication.management import init, get_rank
+from mindspore.nn import Cell
+from mindspore.ops import ArgMaxWithValue
+from mindspore.train.callback import LossMonitor, TimeMonitor
+from mindspore.train.serialization import export
+
+from src.config import Config
+from src.dataset import build_dataset, build_dataloader
+from src.hypertext import HModel
+from src.hypertext_train import HModelWithLoss, HModelTrainOneStepCell, EvalCallBack
+from src.radam_optimizer import RiemannianAdam
+
+parser = argparse.ArgumentParser(description='HyperText Text Classification')
+parser.add_argument('--data_url', type=str, help='dataset dir iflytek_public tnews_public')
+parser.add_argument('--train_url', type=str, help='output dir')
+parser.add_argument('--batch_size', default=32, type=int, help='batch_size')
+parser.add_argument('--datasetType', type=str, help='iflytek/tnews')
+parser.add_argument('--device', default='Ascend', type=str, help='device GPU Ascend')
+parser.add_argument('--num_epochs', default=2, type=int, help='num_epochs')
+parser.add_argument("--run_distribute", type=str, default=False, help="run_distribute")
+args = parser.parse_args()
+
+if args.datasetType == "tnews":
+    args.data_url = os.path.join(args.data_url, "tnews_public")
+elif args.datasetType == "iflytek":
+    args.data_url = os.path.join(args.data_url, "iflytek_public")
+else:
+    print("Unsupported dataset type....")
+    exit()
+
+config = Config(args.data_url, args.train_url, args.device)
+
+if config.device == 'GPU':
+    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
+elif config.device == 'Ascend':
+    context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
+config.num_epochs = int(args.num_epochs)
+config.batch_size = int(args.batch_size)
+config.outputdir = args.train_url
+if not os.path.exists(config.outputdir):
+    os.mkdir(config.outputdir)
+if args.datasetType == 'tnews':
+    config.useTnews()
+else:
+    config.useIflyek()
+print('start process data ..........')
+vocab, train_dataset, dev_dataset, test_dataset = build_dataset(config, use_word=True, min_freq=int(config.min_freq))
+config.n_vocab = len(vocab)
+
+
+class HyperTextTextInferExportCell(Cell):
+    """
+    HyperText network infer.
+    """
+
+    def __init__(self, network):
+        """init fun"""
+        super(HyperTextTextInferExportCell, self).__init__(auto_prefix=False)
+        self.network = network
+        self.argmax = ArgMaxWithValue(axis=1, keep_dims=True)
+
+    def construct(self, x1, x2):
+        """construct hypertexttext infer cell"""
+        predicted_idx = self.network(x1, x2)
+        predicted_idx = self.argmax(predicted_idx)
+        return predicted_idx
+
+
+def build_train(dataset, eval_data, lr, save_path=None, run_distribute=False):
+    """build train"""
+    net_with_loss = HModelWithLoss(config)
+    net_with_loss.init_parameters_data()
+    if save_path is not None:
+        parameter_dict = load_checkpoint(save_path)
+        load_param_into_net(net_with_loss, parameter_dict)
+    if dataset is None:
+        raise ValueError("pre-process dataset must be provided")
+    optimizer = RiemannianAdam(learning_rate=lr,
+                               params=filter(lambda x: x.requires_grad, net_with_loss.get_parameters()))
+    net_with_grads = HModelTrainOneStepCell(net_with_loss, optimizer=optimizer)
+    net_with_grads.set_train()
+    model = Model(net_with_grads)
+    print("Prepare to Training....")
+    epoch_size = dataset.get_repeat_count()
+    print("Epoch size ", epoch_size)
+    eval_cb = EvalCallBack(net_with_loss.hmodel, eval_data, config.eval_step,
+                           config.outputdir + '/' + 'hypertext_' + config.datasetType + '.ckpt')
+    callbacks = [LossMonitor(10), eval_cb, TimeMonitor(50)]
+    if run_distribute:
+        print(f" | Rank {MultiDevice.get_rank()} Call model train.")
+    model.train(epoch=config.num_epochs, train_dataset=dataset, callbacks=callbacks, dataset_sink_mode=False)
+
+
+def set_parallel_env():
+    """set parallel env"""
+    context.reset_auto_parallel_context()
+    MultiDevice.init()
+    context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
+                                      device_num=MultiDevice.get_group_size(),
+                                      gradients_mean=True)
+
+
+def train_single(train_data, dev_data, lr):
+    """train single"""
+    print("Starting training on single device.")
+    data_iter = build_dataloader(train_data, config.batch_size, config.max_length)
+    dev_iter = build_dataloader(dev_data, config.batch_size, config.max_length)
+    build_train(data_iter, dev_iter, lr, save_path=None, run_distribute=False)
+
+
+def train_paralle(train_data, dev_data, lr):
+    """train paralle"""
+    set_parallel_env()
+    print("Starting training on multiple devices.")
+    data_iter = build_dataloader(train_data, config.batch_size, config.max_length,
+                                 rank_size=MultiDevice.get_group_size(),
+                                 rank_id=MultiDevice.get_rank(),
+                                 shuffle=False)
+    dev_iter = build_dataloader(dev_data, config.batch_size, config.max_length,
+                                rank_size=MultiDevice.get_group_size(),
+                                rank_id=MultiDevice.get_rank(),
+                                shuffle=False)
+    build_train(data_iter, dev_iter, lr, save_path=None, run_distribute=True)
+
+
+def run_train(train_data, dev_data, lr, run_distribute):
+    """run train"""
+    if config.device == "GPU":
+        init("nccl")
+        config.rank_id = get_rank()
+    if run_distribute:
+        train_paralle(train_data, dev_data, lr)
+    else:
+        train_single(train_data, dev_data, lr)
+
+
+def run_export():
+    hmodel = HModel(config)
+    file_name = 'hypertext_' + config.datasetType
+    param_dict = load_checkpoint(os.path.join(args.train_url, file_name + '.ckpt'))
+    load_param_into_net(hmodel, param_dict)
+    ht_infer = HyperTextTextInferExportCell(hmodel)
+    x1 = Tensor(np.ones((1, config.max_length)).astype(np.int32))
+    x2 = Tensor(np.ones((1, config.max_length)).astype(np.int32))
+    export(ht_infer, x1, x2, file_name=(args.train_url + file_name), file_format='AIR')
+
+
+run_train(train_dataset, dev_dataset, config.learning_rate, args.run_distribute)
+run_export()