diff --git a/official/nlp/lstm/Dockerfile b/official/nlp/lstm/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..0bbed24751307289ba46f534e191d65edca221c5
--- /dev/null
+++ b/official/nlp/lstm/Dockerfile
@@ -0,0 +1,20 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+ARG FROM_IMAGE_NAME
+FROM $FROM_IMAGE_NAME
+
+COPY requirements.txt .
+RUN pip3.7 install -r requirements.txt
\ No newline at end of file
diff --git a/official/nlp/lstm/infer/docker_start_infer.sh b/official/nlp/lstm/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f501a561c22748e5077f399e3102825bdb44ae78
--- /dev/null
+++ b/official/nlp/lstm/infer/docker_start_infer.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+share_dir=$2
+echo "$1"
+echo "$2"
+if [ -z "${docker_image}" ]; then
+    echo "please input docker_image"
+    exit 1
+fi
+
+if [ ! -d "${share_dir}" ]; then
+    echo "please input share directory that contains dataset, models and codes"
+    exit 1
+fi
+
+
+docker run -it \
+    --device=/dev/davinci0 \
+    --device=/dev/davinci_manager \
+    --device=/dev/devmm_svm \
+    --device=/dev/hisi_hdc \
+    --privileged \
+    -v //usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
+    -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+    -v ${share_dir}:${share_dir} \
+    ${docker_image} \
+    /bin/bash
diff --git a/official/nlp/lstm/infer/model/model_conversion.sh b/official/nlp/lstm/infer/model/model_conversion.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5acb19a44f663cda415008a912019371fde42f9f
--- /dev/null
+++ b/official/nlp/lstm/infer/model/model_conversion.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# This is used to convert air model file to .om model file.
+
+
+# Set environment PATH (Please confirm that the install_path is correct).
+
+export install_path=/usr/local/Ascend
+export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH
+export PYTHONPATH=${install_path}/atc/python/site-packages:${install_path}/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/atc/python/site-packages/schedule_search.egg
+export LD_LIBRARY_PATH=${install_path}/atc/lib64:$LD_LIBRARY_PATH
+export ASCEND_OPP_PATH=${install_path}/opp
+
+
+# Execute, transform LSTM model.
+
+atc --model=./LSTM.air --framework=1 --output=./LSTM --soc_version=Ascend310  --log=error
+
+exit 0
\ No newline at end of file
diff --git a/official/nlp/lstm/infer/mxbase/CMakeLists.txt b/official/nlp/lstm/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..79337195b89cc7e7d0d51d969ccf4442f02e3b7c
--- /dev/null
+++ b/official/nlp/lstm/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,51 @@
+cmake_minimum_required(VERSION 3.10.0)
+project(LSTM)
+
+set(TARGET lstm)
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+add_definitions(-Dgoogle=mindxsdk_private)
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
+
+# Check environment variable
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+if(NOT DEFINED ENV{ASCEND_VERSION})
+    message(WARNING "please define environment variable:ASCEND_VERSION")
+endif()
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include)
+set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64)
+
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
+if(DEFINED ENV{MXSDK_OPENSOURCE_DIR})
+    set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
+else()
+    set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource)
+endif()
+
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+add_executable(${TARGET} src/main.cpp src/SentimentNet.cpp)
+target_link_libraries(${TARGET} glog cpprest mxbase opencv_world stdc++fs)
+
+install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
diff --git a/official/nlp/lstm/infer/mxbase/build.sh b/official/nlp/lstm/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..33696f2361f1cd8848029f71d576da372347ea40
--- /dev/null
+++ b/official/nlp/lstm/infer/mxbase/build.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+path_cur=$(dirname "$0")
+
+function check_env()
+{
+    # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user
+    if [ ! "${ASCEND_VERSION}" ]; then
+        export ASCEND_VERSION=ascend-toolkit/latest
+        echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}"
+    else
+        echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user"
+    fi
+
+    if [ ! "${ARCH_PATTERN}" ]; then
+        # set ARCH_PATTERN to ./ when it was not specified by user
+        export ARCH_PATTERN=./
+        echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}"
+    else
+        echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user"
+    fi
+}
+
+function build_lstm()
+{
+    cd "$path_cur" || exit
+    rm -rf build
+    mkdir -p build
+    cd build || exit
+    cmake ..
+    make
+    ret=$?
+    if [ ${ret} -ne 0 ]; then
+        echo "Failed to build bert."
+        exit ${ret}
+    fi
+    make install
+}
+
+check_env
+build_lstm
\ No newline at end of file
diff --git a/official/nlp/lstm/infer/mxbase/run.sh b/official/nlp/lstm/infer/mxbase/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..56981aa6398bac5139a98e70c40b8c7d2df7df01
--- /dev/null
+++ b/official/nlp/lstm/infer/mxbase/run.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# curr path
+cur_path=$(
+  cd "$(dirname "$0")" || exit
+  pwd
+)
+# env ready
+env_ready=true
+
+# execute arguments
+input_sentences_dir=''
+max_load_num=''
+evaluate=false
+label_path=''
+
+function parse_arguments() {
+  echo "parsing arguments ..."
+  while getopts "i:m:e:l:" opt; do
+    case ${opt} in
+    i)
+      input_sentences_dir=$OPTARG
+      ;;
+    m)
+      max_load_num=$OPTARG
+      ;;
+    e)
+      evaluate=$OPTARG
+      ;;
+    l)
+      label_path=$OPTARG
+      ;;
+    *)
+      echo "*分支:${OPTARG}"
+      ;;
+    esac
+  done
+
+  # print arguments
+  echo "---------------------------"
+  echo "| execute arguments"
+  echo "| input sentences dir: $input_sentences_dir"
+  echo "| max load num: $max_load_num"
+  echo "| evaluate: $evaluate"
+  echo "| input sentences label path: $label_path"
+  echo "---------------------------"
+}
+
+function check_env() {
+  echo "checking env ..."
+  # check MindXSDK env
+  if [ ! "${MX_SDK_HOME}" ]; then
+    env_ready=false
+    echo "please set MX_SDK_HOME path into env."
+  else
+    echo "MX_SDK_HOME set as ${MX_SDK_HOME}, ready."
+  fi
+}
+
+function execute() {
+  if [ "${max_load_num}" == '' ]; then
+    if [ "${evaluate}" == true ]; then
+        ./lstm "$input_sentences_dir" 1 "$label_path"
+    else
+        ./lstm "$input_sentences_dir"
+    fi
+  else
+    if [ "${evaluate}" == true ]; then
+        ./lstm "$input_sentences_dir" "$max_load_num" 1 "$label_path"
+    else
+        ./lstm "$input_sentences_dir" "$max_load_num"
+    fi
+  fi
+}
+
+function run() {
+  echo -e "\ncurrent dir: $cur_path"
+  # parse arguments
+  parse_arguments "$@"
+
+  # check environment
+  check_env
+  if [ "${env_ready}" == false ]; then
+    echo "please set env first."
+    exit 0
+  fi
+
+  # check file
+  if [ ! -f "${cur_path}/lstm" ]; then
+    echo "lstm not exist, please build first."
+    exit 0
+  fi
+
+  echo "---------------------------"
+  echo "prepare to execute program."
+  echo -e "---------------------------\n"
+
+  execute
+}
+
+run "$@"
+exit 0
diff --git a/official/nlp/lstm/infer/mxbase/src/SentimentNet.cpp b/official/nlp/lstm/infer/mxbase/src/SentimentNet.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7646b0227d2dd91a17780b34710f77626594ac1d
--- /dev/null
+++ b/official/nlp/lstm/infer/mxbase/src/SentimentNet.cpp
@@ -0,0 +1,351 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SentimentNet.h"
+#include <unistd.h>
+#include <sys/stat.h>
+#include <map>
+#include <fstream>
+#include <cmath>
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+const uint32_t MAX_LENGTH = 128000;
+
+/**
+ * Init SentimentNet with {@link InitParam}
+ * @param initParam const reference to initial param
+ * @return status code of whether initialization is successful
+ */
+APP_ERROR SentimentNet::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    // init device manager
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    // set tensor context
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    // init model inference processor
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+/**
+ * De-init SentimentNet
+ * @return status code of whether de-initialization is successful
+ */
+APP_ERROR SentimentNet::DeInit() {
+    model_->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+/**
+ * read tensor from file
+ * @param file const reference to file name
+ * @param data the pointer to data which store the read data
+ * @param size data size
+ * @return status code of whether read data is successful
+ */
+APP_ERROR SentimentNet::ReadTensorFromFile(const std::string &file, uint32_t *data, const uint32_t size) {
+    if (data == NULL || size < MAX_LENGTH) {
+        LogError << "input data is invalid.";
+        return APP_ERR_COMM_INVALID_POINTER;
+    }
+    std::ifstream infile;
+    // open sentence file
+    infile.open(file, std::ios_base::in | std::ios_base::binary);
+    // check sentence file validity
+    if (infile.fail()) {
+        LogError << "Failed to open label file: " << file << ".";
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    // read sentence data
+    infile.read(reinterpret_cast<char *>(data), sizeof(uint32_t) * MAX_LENGTH);
+    infile.close();
+    return APP_ERR_OK;
+}
+
+/**
+ * read input tensor from file
+ * @param fileName const reference to file name
+ * @param index index of modelDesc inputTensors
+ * @param inputs reference to input tensor stored
+ * @return status code of whether reading tensor is successful
+ */
+APP_ERROR SentimentNet::ReadInputTensor(const std::string &fileName, uint32_t index,
+                                        const std::shared_ptr<std::vector<MxBase::TensorBase>> &inputs) {
+    // read data from file
+    uint32_t data[MAX_LENGTH] = {0};
+    APP_ERROR ret = ReadTensorFromFile(fileName, data, MAX_LENGTH);
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadTensorFromFile failed.";
+        return ret;
+    }
+
+    // convert memory type
+    const uint32_t dataSize = modelDesc_.inputTensors[index].tensorSize;  // 128000 64 * 2000
+    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(reinterpret_cast<void *>(data), dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+    ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc and copy failed.";
+        return ret;
+    }
+
+    // construct input tensor
+    std::vector <uint32_t> shape = {1, MAX_LENGTH};
+    inputs->push_back(MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT32));
+    return APP_ERR_OK;
+}
+
+/**
+ * Sentence Classification
+ * @param inputs const reference to word vector of sentence
+ * @param outputs reference to the model output tensors
+ * @return status code of whether inference is successful
+ */
+APP_ERROR SentimentNet::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                                  const std::shared_ptr<std::vector<MxBase::TensorBase>> &outputs) {
+    // construct output tensor container
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector <uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t) modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs->push_back(tensor);
+    }
+
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+
+    // statistic inference delay
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_->ModelInference(inputs, *(outputs.get()), dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    g_infer_cost.push_back(costMs);
+
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+/**
+ * choose the max probability as the inference result
+ * @param outputs reference to model output tensors
+ * @param argmax reference to the index of max probability
+ * @return status code of whether post-process is successful
+ */
+APP_ERROR SentimentNet::PostProcess(const std::shared_ptr<std::vector<MxBase::TensorBase>> &outputs,
+                                    const std::shared_ptr<std::vector<uint32_t>> &argmax, bool printResult) {
+    MxBase::TensorBase &tensor = outputs->at(0);
+    APP_ERROR ret = tensor.ToHost();
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Tensor deploy to host failed.";
+        return ret;
+    }
+
+    // get output tensor info
+    auto outputShape = tensor.GetShape();
+    uint32_t length = outputShape[0];
+    uint32_t classNum = outputShape[1];
+    void *data = tensor.GetBuffer();
+
+    // get inference result
+    for (uint32_t i = 0; i < length; i++) {
+        std::vector<float> result = {};
+        std::vector<float> softmax_result = {};
+        float sum_softmax = 0.0;
+        std::string inferOutputTensor = "infer output tensor: [";
+        for (uint32_t j = 0; j < classNum; j++) {
+            float value = *(reinterpret_cast<float *>(data) + i * classNum + j);
+            if (j == classNum - 1) {
+                inferOutputTensor = inferOutputTensor + std::to_string(value) + "]";
+            } else {
+                inferOutputTensor = inferOutputTensor + std::to_string(value) + ",";
+            }
+            softmax_result.push_back(std::exp(value));
+            sum_softmax += softmax_result[j];
+        }
+        // softmax
+        std::string softMaxResult = "softmax result: [";
+        for (uint32_t j = 0; j < classNum; j++) {
+            float value = softmax_result[j] / sum_softmax;
+            if (j == classNum - 1) {
+                softMaxResult = softMaxResult + std::to_string(value) + "]";
+            } else {
+                softMaxResult = softMaxResult + std::to_string(value) + ",";
+            }
+            result.push_back(value);
+        }
+        // argmax and get the classification id
+        std::vector<float>::iterator maxElement = std::max_element(std::begin(result), std::end(result));
+        uint32_t argmaxIndex = maxElement - std::begin(result);
+        argmax->push_back(argmaxIndex);
+        std::string infer_result = argmaxIndex == 1 ? "1-pos" : "0-neg";
+
+        if (printResult) {
+            LogDebug << inferOutputTensor;
+            LogDebug << softMaxResult;
+            LogDebug << "infer result: " << infer_result;
+        }
+    }
+    return APP_ERR_OK;
+}
+
+/**
+ * count true positive, false positive, true negative, false negative, calculate real-time accuracy
+ * @param labels const reference to the corresponding real labels of input sentences
+ * @param startIndex the start index of real labels
+ * @param argmax const reference to the model inference result
+ */
+void SentimentNet::CountPredictResult(const std::vector<uint32_t> &labels, uint32_t startIndex,
+                                      const std::vector<uint32_t> &argmax) {
+    uint32_t dataSize = argmax.size();
+
+    // compare with ground truth
+    for (uint32_t i = 0; i < dataSize; i++) {
+        bool positive = false;
+        if (labels[i + startIndex] == 1) {
+            positive = true;
+        }
+
+        if (labels[i + startIndex] == argmax[i]) {
+            if (positive) {
+                g_true_positive += 1;
+            } else {
+                g_true_negative += 1;
+            }
+        } else {
+            if (positive) {
+                g_false_positive += 1;
+            } else {
+                g_false_negative += 1;
+            }
+        }
+    }
+
+    uint32_t total = g_true_positive + g_false_positive + g_true_negative + g_false_negative;
+    LogInfo << "TP: " << g_true_positive << ", FP: " << g_false_positive
+            << ", TN: " << g_true_negative << ", FN: " << g_false_negative;
+    LogInfo << "current accuracy: "
+            << (g_true_positive + g_true_negative) * 1.0 / total;
+}
+
+/**
+ * write model inference result to file
+ * @param fileName result file name
+ * @param argmax const reference of model inference result
+ * @return status code of whether writing file is successful
+ */
+APP_ERROR SentimentNet::WriteResult(const std::string &fileName, const std::vector<uint32_t> &argmax, bool firstInput) {
+    std::string resultPathName = "result";
+    // create result directory when it does not exit
+    if (access(resultPathName.c_str(), 0) != 0) {
+        int ret = mkdir(resultPathName.c_str(), S_IRUSR | S_IWUSR | S_IXUSR);
+        if (ret != 0) {
+            LogError << "Failed to create result directory: " << resultPathName << ", ret = " << ret;
+            return APP_ERR_COMM_OPEN_FAIL;
+        }
+    }
+    // create result file under result directory
+    resultPathName = resultPathName + "/result.txt";
+    std::ofstream resultFile(resultPathName, firstInput ? std::ofstream::ate : std::ofstream::app);
+    if (resultFile.fail()) {
+        LogError << "Failed to open result file: " << resultPathName;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    // write inference result into file
+    resultFile << "file name is: " << fileName << " review num: " << std::to_string(argmax.size()) << std::endl;
+    for (uint32_t i = 0; i < argmax.size(); i++) {
+        std::string prediction = argmax.at(i) == 1 ? "positive" : "negative";
+        resultFile << std::to_string(i + 1) << "-th review: " << prediction << std::endl;
+    }
+    resultFile.close();
+    return APP_ERR_OK;
+}
+
+/**
+ * Emotional classification of the input sentences, result is positive or negative
+ * @param inferPath const reference to input sentences dir
+ * @param fileName const reference to sentences file name
+ * @param eval whether do evaluation
+ * @param labels const reference to the corresponding real label of input sentences
+ * @param startIndex the start index of real labels in curr inference round
+ * @return status code of whether the workflow is successful
+ */
+APP_ERROR SentimentNet::Process(const std::string &inferPath, const std::string &fileName, bool firstInput,
+                                bool eval, const std::vector<uint32_t> &labels, const uint32_t startIndex) {
+    // read word vector of sentences
+    std::shared_ptr<std::vector<MxBase::TensorBase>> inputs = std::make_shared<std::vector<MxBase::TensorBase>>();
+    std::string inputSentencesFile = inferPath + fileName;
+    APP_ERROR ret = ReadInputTensor(inputSentencesFile, 0, inputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Read input ids failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    // model inference
+    std::shared_ptr<std::vector<MxBase::TensorBase>> outputs = std::make_shared<std::vector<MxBase::TensorBase>>();
+    ret = Inference(*(inputs.get()), outputs);
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    // softmax to get final inference result
+    std::shared_ptr<std::vector<uint32_t>> argmax = std::make_shared<std::vector<uint32_t>>();
+    ret = PostProcess(outputs, argmax, false);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    // save inference result
+    ret = WriteResult(fileName, *(argmax.get()), firstInput);
+    if (ret != APP_ERR_OK) {
+        LogError << "save result failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    // evaluation
+    if (eval) {
+        CountPredictResult(labels, startIndex, *(argmax.get()));
+    }
+
+    return APP_ERR_OK;
+}
diff --git a/official/nlp/lstm/infer/mxbase/src/SentimentNet.h b/official/nlp/lstm/infer/mxbase/src/SentimentNet.h
new file mode 100644
index 0000000000000000000000000000000000000000..2126bac1c71ab9edf62a6d6c3c77cb844da1461f
--- /dev/null
+++ b/official/nlp/lstm/infer/mxbase/src/SentimentNet.h
@@ -0,0 +1,73 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SENTIMENTNET_H
+#define SENTIMENTNET_H
+
+#include <memory>
+#include <utility>
+#include <vector>
+#include <string>
+#include <map>
+#include <opencv2/opencv.hpp>
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+
+extern std::vector<double> g_infer_cost;
+extern uint32_t g_true_positive;
+extern uint32_t g_false_positive;
+extern uint32_t g_true_negative;
+extern uint32_t g_false_negative;
+
+struct InitParam {
+    uint32_t deviceId;
+    std::string modelPath;
+};
+
+class SentimentNet {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+
+    APP_ERROR DeInit();
+
+    APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs,
+                        const std::shared_ptr<std::vector<MxBase::TensorBase>> &outputs);
+
+    APP_ERROR Process(const std::string &inferPath, const std::string &fileName, bool firstInput,
+                      bool eval, const std::vector<uint32_t> &labels, const uint32_t startIndex);
+
+    APP_ERROR PostProcess(const std::shared_ptr<std::vector<MxBase::TensorBase>> &outputs,
+                          const std::shared_ptr<std::vector<uint32_t>> &argmax, bool printResult);
+
+ protected:
+    APP_ERROR ReadTensorFromFile(const std::string &file, uint32_t *data, const uint32_t size);
+
+    APP_ERROR ReadInputTensor(const std::string &fileName, uint32_t index,
+                              const std::shared_ptr<std::vector<MxBase::TensorBase>> &inputs);
+
+    APP_ERROR WriteResult(const std::string &fileName, const std::vector<uint32_t> &argmax, bool firstInput);
+
+    void
+    CountPredictResult(const std::vector<uint32_t> &labels, uint32_t startIndex, const std::vector<uint32_t> &argmax);
+
+ private:
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    MxBase::ModelDesc modelDesc_ = {};
+    uint32_t deviceId_ = 0;
+};
+
+#endif
diff --git a/official/nlp/lstm/infer/mxbase/src/main.cpp b/official/nlp/lstm/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ddbd34020f37bae92e812c0ce64209947406c938
--- /dev/null
+++ b/official/nlp/lstm/infer/mxbase/src/main.cpp
@@ -0,0 +1,184 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <dirent.h>
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include "SentimentNet.h"
+#include "MxBase/Log/Log.h"
+
+std::vector<double> g_infer_cost;
+uint32_t g_true_positive = 0;
+uint32_t g_false_positive = 0;
+uint32_t g_true_negative = 0;
+uint32_t g_false_negative = 0;
+
+static void InitSentimentNetParam(std::shared_ptr<InitParam> initParam) {
+    initParam->deviceId = 0;
+    initParam->modelPath = "../model/LSTM.om";
+}
+
+static APP_ERROR ReadFilesFromPath(const std::string &path, std::shared_ptr<std::vector<std::string>> files) {
+    DIR *dir = NULL;
+    struct dirent *ptr = NULL;
+
+    if ((dir = opendir(path.c_str())) == NULL) {
+        LogError << "Open dir error: " << path;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+
+    while ((ptr = readdir(dir)) != NULL) {
+        // d_type == 8 is files
+        if (ptr->d_type == 8) {
+            files->push_back(ptr->d_name);
+        }
+    }
+    closedir(dir);
+    // sort ascending order
+    sort(files->begin(), files->end());
+    return APP_ERR_OK;
+}
+
+static void CalculateMetrics() {
+    // calculate metrics
+    LogInfo << "==============================================================";
+    uint32_t total = g_true_positive + g_false_positive + g_true_negative + g_false_negative;
+    float precision = (g_true_positive + g_true_negative) * 1.0 / total;
+    float posPrecision = g_true_positive * 1.0 / (g_true_positive + g_false_positive);
+    float negPrecision = g_true_negative * 1.0 / (g_true_negative + g_false_negative);
+    LogInfo << "total accuracy: " << precision;
+    LogInfo << "positive precision: " << posPrecision;
+    LogInfo << "negative precision: " << negPrecision;
+    float posRecall = g_true_positive * 1.0 / (g_true_positive + g_false_negative);
+    float negRecall = g_true_negative * 1.0 / (g_true_negative + g_false_positive);
+    LogInfo << "positive recall: " << posRecall;
+    LogInfo << "negative recall: " << negRecall;
+    LogInfo << "positive F1 Score: " << 2 * posPrecision * posRecall / (posPrecision + posRecall);
+    LogInfo << "negative F1 Score: " << 2 * negPrecision * negRecall / (negPrecision + negRecall);
+    LogInfo << "==============================================================";
+}
+
+int main(int argc, char *argv[]) {
+    bool eval = false;
+    uint32_t maxLoadNum = 2;
+    std::string inferPath = "";
+    std::string labelPath = "";
+
+    int numArgTwo = 2;
+    int numArgThree = 3;
+    int numArgFour = 4;
+    int numArgFive = 5;
+    if (argc <= 1) {
+        LogWarn << "Please input sentences file path, such as './lstm /input/data 2 true /input/label.txt'.";
+        return APP_ERR_OK;
+    } else if (argc == numArgTwo) {
+        inferPath = argv[1];
+    } else if (argc == numArgThree) {
+        inferPath = argv[1];
+        maxLoadNum = atoi(argv[2]);
+    } else if (argc == numArgFour) {
+        inferPath = argv[1];
+        eval = atoi(argv[2]);
+        labelPath = argv[3];
+    } else if (argc == numArgFive) {
+        inferPath = argv[1];
+        maxLoadNum = atoi(argv[2]);
+        eval = atoi(argv[3]);
+        labelPath = argv[4];
+    }
+
+    if (inferPath == "") {
+        LogWarn << "Input sentences dir is null, use default config";
+        inferPath = "../dataset/aclImdb/preprocess/00_data/";
+    }
+    if (eval && labelPath == "") {
+        LogWarn << "Input sentences label path is null, use default config";
+        labelPath = "../dataset/aclImdb/preprocess/labels.txt";
+    }
+
+    // load sentences files
+    std::shared_ptr<std::vector<std::string>> files = std::make_shared<std::vector<std::string>>();
+    APP_ERROR ret = ReadFilesFromPath(inferPath, files);
+    if (ret != APP_ERR_OK) {
+        LogError << "Read files from path failed, ret=" << ret << ".";
+        return ret;
+    }
+    LogInfo << "test set files: " << std::to_string(files->size());
+
+    // load sentences labels
+    std::vector<uint32_t> labels;
+    if (eval) {
+        std::ifstream labelFile(labelPath);
+        std::string line;
+        while (std::getline(labelFile, line)) {
+            labels.push_back(atoi(line.c_str()));
+        }
+        labelFile.close();
+
+        LogInfo << "test set size: " << std::to_string(labels.size());
+    }
+
+    // init SentimentNet
+    std::shared_ptr<InitParam> initParam = std::make_shared<InitParam>();
+    InitSentimentNetParam(initParam);
+    auto sentimentNet = std::make_shared<SentimentNet>();
+    ret = sentimentNet->Init(*(initParam.get()));
+    if (ret != APP_ERR_OK) {
+        LogError << "SentimentNet init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    // infer
+    const uint32_t batchSize = 64;
+    LogInfo << "Max test num: " << std::to_string(maxLoadNum) << " data batch size: " << std::to_string(batchSize);
+    bool firstInput = true;
+    for (uint32_t i = 0; i < files->size(); i++) {
+        if (i + 1 > maxLoadNum) {
+            break;
+        }
+        std::string fileName = "LSTM_data_bs64_" + std::to_string(i) + ".bin";
+        LogInfo << "read file name: " << fileName;
+        ret = sentimentNet->Process(inferPath, fileName, firstInput, eval, labels, i * batchSize);
+        firstInput = false;
+        if (ret != APP_ERR_OK) {
+            LogError << "SentimentNet process failed, ret=" << ret << ".";
+            sentimentNet->DeInit();
+            return ret;
+        }
+    }
+
+    // evaluate and statistic delay
+    if (eval) {
+        CalculateMetrics();
+    }
+    double costSum = 0;
+    for (uint32_t i = 0; i < g_infer_cost.size(); i++) {
+        costSum += g_infer_cost[i];
+    }
+    LogInfo << "Infer sentences sum " << g_infer_cost.size() << ", cost total time: " << costSum << " ms.";
+    LogInfo << "The throughput: " << g_infer_cost.size() * 1000 / costSum << " bin/sec.";
+
+    // DeInit SentimentNet
+    ret = sentimentNet->DeInit();
+    if (ret != APP_ERR_OK) {
+        LogError << "SentimentNet DeInit failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    return APP_ERR_OK;
+}
diff --git a/official/nlp/lstm/infer/sdk/main.py b/official/nlp/lstm/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba56f098ec30042502da18d898e2c033c1a4a341
--- /dev/null
+++ b/official/nlp/lstm/infer/sdk/main.py
@@ -0,0 +1,112 @@
+# coding = utf-8
+"""
+Copyright 2021 Huawei Technologies Co., Ltd
+
+Licensed under the BSD 3-Clause License  (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+https://opensource.org/licenses/BSD-3-Clause
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import argparse
+
+from sentiment import sentiment_analysis
+from utils import util
+
+
+def parse_args():
+    """
+    set and check parameters
+    """
+    parser = argparse.ArgumentParser(description="lstm process")
+    parser.add_argument("--pipeline", type=str, default='pipeline/lstm.pipeline',
+                        help="SDK infer pipeline")
+
+    parser.add_argument("--input_sentences_dir", type=str, default="../dataset/data/input",
+                        help="input sentences dir")
+    parser.add_argument("--max_load_num", type=str, default='', help="max num of infer files")
+
+    parser.add_argument("--parse_word_vector", type=bool, default=False,
+                        help="whether need to parse review to word vector")
+    parser.add_argument("--vocab_path", type=str, default="../dataset/data/imdb.vocab",
+                        help="vocabulary table path")
+
+    parser.add_argument("--do_eval", type=bool, default=False,
+                        help="whether evaluate sentiment model accuracy with test_set")
+    parser.add_argument("--test_set_feature_path", type=str, default="../dataset/aclImdb/preprocess/00_data",
+                        help="test dataset feature path, effective when enable do_eval")
+    parser.add_argument("--test_set_label_path", type=str, default="../dataset/aclImdb/preprocess/label_ids.npy",
+                        help="test dataset label path, effective when enable do_eval")
+
+    parser.add_argument("--result_dir", type=str, default="result", help="save result path")
+    parser.add_argument("--infer_result_file", type=str, default="infer.txt", help="infer result file name")
+    parser.add_argument("--eval_result_file", type=str, default="eval.txt", help="evaluate result file name")
+
+    args_opt = parser.parse_args()
+    return args_opt
+
+
+def run():
+    """
+    do infer and evaluate
+    """
+    config = parse_args()
+    print(config)
+
+    batch_size = 64
+    load_preprocess_num = None if config.max_load_num == '' else int(config.max_load_num)
+
+    if config.do_eval:
+        # load test set
+        sequences_word_vector, sequences_label, eval_files = util.load_preprocess_test_set(config.test_set_feature_path,
+                                                                                           config.test_set_label_path,
+                                                                                           load_preprocess_num)
+
+        # model inference
+        infer_result = sentiment_analysis.senti_analysis(sequences_word_vector, config.pipeline, is_batch=True,
+                                                         batch_size=batch_size)
+        if infer_result is None or infer_result.shape[0] < sequences_label.shape[0]:
+            print('Sentiment model infer error.')
+            return
+
+        # calculate evaluation metrics
+        metrics = util.calculate_metrics(sequences_label, infer_result)
+        # write infer result to file
+        util.write_infer_result_to_file(config.result_dir, config.infer_result_file, eval_files, infer_result, False)
+        # write evaluation result to file
+        util.write_eval_result_to_file(config.result_dir, config.eval_result_file, metrics)
+    else:
+        # load model input
+        if config.parse_word_vector:
+            sentence_word_vector, infer_files = util.convert_sentence_to_word_vector(config.input_sentences_dir,
+                                                                                     config.vocab_path)
+        else:
+            sentence_word_vector, infer_files = util.load_infer_input(config.input_sentences_dir, load_preprocess_num)
+
+        # model inference
+        if sentence_word_vector.shape[0] > 1:
+            infer_result = sentiment_analysis.senti_analysis(sentence_word_vector,
+                                                             config.pipeline, is_batch=True, batch_size=batch_size)
+        else:
+            infer_result = sentiment_analysis.senti_analysis(sentence_word_vector, config.pipeline, is_batch=False)
+
+        if infer_result is None:
+            print('Sentiment model infer error.')
+        else:
+            print('sentiment model infer result: ', infer_result.reshape(1, -1).squeeze())
+            # write infer result to file
+            util.write_infer_result_to_file(config.result_dir, config.infer_result_file, infer_files, infer_result,
+                                            config.parse_word_vector)
+
+    return
+
+
+if __name__ == '__main__':
+    run()
diff --git a/official/nlp/lstm/infer/sdk/pipeline/lstm.pipeline b/official/nlp/lstm/infer/sdk/pipeline/lstm.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..d380c933358b4fdc88d9808a09759223851afce2
--- /dev/null
+++ b/official/nlp/lstm/infer/sdk/pipeline/lstm.pipeline
@@ -0,0 +1,32 @@
+{
+    "sentiment": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0:0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "appsrc0",
+                "modelPath": "../model/LSTM.om"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_tensorinfer0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "factory": "appsink"
+        }
+    }
+}
\ No newline at end of file
diff --git a/official/nlp/lstm/infer/sdk/run.sh b/official/nlp/lstm/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..aaa44ddee5b723fa2d0a27416b6e5bfa25115dee
--- /dev/null
+++ b/official/nlp/lstm/infer/sdk/run.sh
@@ -0,0 +1,187 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# curr path
+cur_path=$(
+  cd "$(dirname "$0")" || exit
+  pwd
+)
+# env ready
+env_ready=true
+# param check
+infer_param_ready=true
+eval_param_ready=true
+
+# execute arguments
+
+# execute mode opt = {'infer', 'evaluate'}
+mode='infer'
+# input sentence dir, effective in infer mode
+input=''
+# vocab path, effective in infer mode
+vocab=''
+# use preprocess sentences as input, opt = {true, false}
+use_preprocess=false
+# number of loading infer files
+load_preprocess_num=''
+# test set feature path, effective in evaluate mode
+feature=''
+# test set label path, effective in evaluate mode
+label=''
+# use default config
+use_default=false
+
+function parse_arguments() {
+  echo "parsing arguments ..."
+  while getopts "m:i:v:p:n:f:l:u:" opt; do
+    case ${opt} in
+    m)
+      mode=$OPTARG
+      ;;
+    i)
+      input=$OPTARG
+      ;;
+    v)
+      vocab=$OPTARG
+      ;;
+    p)
+      use_preprocess=$OPTARG
+      ;;
+    n)
+      load_preprocess_num=$OPTARG
+      ;;
+    f)
+      feature=$OPTARG
+      ;;
+    l)
+      label=$OPTARG
+      ;;
+    u)
+      use_default=$OPTARG
+      ;;
+    *)
+      echo "*分支:${OPTARG}"
+      ;;
+    esac
+  done
+
+  # print arguments
+  echo "---------------------------"
+  echo "| execute arguments"
+  echo "| mode: $mode"
+  echo "| input sentence dir: $input"
+  echo "| vocab path: $vocab"
+  echo "| use preprocess sentences: $use_preprocess"
+  echo "| load preprocess num: $load_preprocess_num"
+  echo "| test set feature path: $feature"
+  echo "| test set label path: $label"
+  echo "| use default config: $use_default"
+  echo "---------------------------"
+}
+
+function check_env() {
+  echo "checking env ..."
+  # check MindXSDK env
+  if [ ! "${MX_SDK_HOME}" ]; then
+    env_ready=false
+    echo "please set MX_SDK_HOME path into env."
+  else
+    echo "MX_SDK_HOME set as ${MX_SDK_HOME}, ready."
+  fi
+}
+
+function check_infer_param() {
+    if [ "$input" == '' ]; then
+        infer_param_ready=false
+        echo "please config input sentence dir"
+    fi
+    if [ "$use_preprocess" == false ]; then
+      if [ "$vocab" == '' ]; then
+          infer_param_ready=false
+          echo "please config vocab path"
+      fi
+    else
+      if [ "$load_preprocess_num" == '' ]; then
+          infer_param_ready=false
+          echo "please config load preprocess num"
+      fi
+    fi
+}
+
+function check_eval_param() {
+  if [ "$feature" == '' ]; then
+    eval_param_ready=false
+    echo "please config test set feature path"
+  fi
+  if [ "$label" == '' ]; then
+    eval_param_ready=false
+    echo "please config test set label path"
+  fi
+}
+
+function execute() {
+  if [ "${mode}" == 'infer' ]; then
+    if [ "$use_default" == true ]; then
+        python3.7 main.py --parse_word_vector=true
+    else
+      check_infer_param
+      if [ "$infer_param_ready" == false ]; then
+          echo "please check infer parameters"
+          exit 0
+      fi
+      if [ "$use_preprocess" == true ]; then
+        python3.7 main.py --input_sentences_dir="$input" --max_load_num="$load_preprocess_num"
+      else
+        python3.7 main.py --input_sentences_dir="$input" --vocab_path="$vocab" --parse_word_vector=true
+      fi
+    fi
+  elif [ "${mode}" == 'evaluate' ]; then
+     if [ "$use_default" == true ]; then
+        python3.7 main.py --do_eval=true
+    else
+      check_eval_param
+      if [ "$eval_param_ready" == false ]; then
+        echo "please check evaluate parameters"
+        exit 0
+      fi
+      python3.7 main.py --do_eval=true --test_set_feature_path="$feature" --test_set_label_path="$label" --max_load_num="$load_preprocess_num"
+    fi
+  fi
+}
+
+function run() {
+  echo -e "\ncurrent dir: $cur_path"
+  # parse arguments
+  parse_arguments "$@"
+
+  # check environment
+  check_env
+  if [ "${env_ready}" == false ]; then
+    echo "please set env first."
+    exit 0
+  fi
+
+  echo "---------------------------"
+  echo "prepare to execute program."
+  echo -e "---------------------------\n"
+
+  execute
+}
+
+run "$@"
+exit 0
diff --git a/official/nlp/lstm/infer/sdk/sentiment/sentiment_analysis.py b/official/nlp/lstm/infer/sdk/sentiment/sentiment_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ab5d04c8b7302f30a7769dfa1feb4fd38ce423b
--- /dev/null
+++ b/official/nlp/lstm/infer/sdk/sentiment/sentiment_analysis.py
@@ -0,0 +1,208 @@
+# coding = utf-8
+"""
+Copyright 2021 Huawei Technologies Co., Ltd
+
+Licensed under the BSD 3-Clause License  (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+https://opensource.org/licenses/BSD-3-Clause
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+
+import MxpiDataType_pb2 as MxpiDataType
+import numpy as np
+from StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, \
+    MxProtobufIn, StringVector
+
+
+def send_source_data(appsrc_id, raw_data, stream_name, stream_manager):
+    """
+    Construct the input of the stream,
+    send inputs data to a specified stream based on stream name
+    :param appsrc_id: corresponding appsrc id
+    :param raw_data: data which need to send into stream
+    :param stream_name: the name of infer stream that needs to operate
+    :param stream_manager: the manager of infer streams
+    :return bool: send data success or not
+    """
+    tensor = np.array(raw_data).astype(np.int32)
+    # expand dim when raw data only one-dimension
+    word_vector_length = 500
+    if len(raw_data.shape) == 1 and raw_data.shape[0] == word_vector_length:
+        tensor = np.expand_dims(tensor, 0)
+
+    tensor_package_list = MxpiDataType.MxpiTensorPackageList()
+    tensor_package = tensor_package_list.tensorPackageVec.add()
+    array_bytes = tensor.tobytes()
+    data_input = MxDataInput()
+    data_input.data = array_bytes
+    tensor_vec = tensor_package.tensorVec.add()
+    tensor_vec.deviceId = 0
+    tensor_vec.memType = 0
+    for i in tensor.shape:
+        tensor_vec.tensorShape.append(i)
+    tensor_vec.dataStr = data_input.data
+    tensor_vec.tensorDataSize = len(array_bytes)
+    key = "appsrc{}".format(appsrc_id).encode('utf-8')
+    protobuf_vec = InProtobufVector()
+    protobuf = MxProtobufIn()
+    protobuf.key = key
+    protobuf.type = b'MxTools.MxpiTensorPackageList'
+    protobuf.protobuf = tensor_package_list.SerializeToString()
+    protobuf_vec.push_back(protobuf)
+
+    ret = stream_manager.SendProtobuf(stream_name, appsrc_id, protobuf_vec)
+    if ret < 0:
+        print("Failed to send data to stream, ret = {}.".format(ret))
+        return False
+    return True
+
+
+def infer(stream_manager, stream_name, in_plugin_id, data_input):
+    """
+    send data into infer stream and get infer result
+    :param stream_manager: the manager of infer streams
+    :param stream_name: the name of infer stream that needs to operate
+    :param in_plugin_id: ID of the plug-in that needs to send data
+    :param data_input: data that needs to send into infer stream
+    :return: infer results
+    """
+    # Inputs data to a specified stream based on stream name
+    send_success = send_source_data(in_plugin_id, data_input, stream_name, stream_manager)
+    if not send_success:
+        print('Failed to send data to stream')
+        return None
+
+    # construct output plugin vector
+    plugin_names = [b"mxpi_tensorinfer0"]
+    plugin_vec = StringVector()
+    for key in plugin_names:
+        plugin_vec.push_back(key)
+
+    # get plugin output data
+    infer_result = stream_manager.GetProtobuf(stream_name, in_plugin_id, plugin_vec)
+
+    # check whether the inferred results is valid
+    infer_result_valid = True
+    if infer_result.size() == 0:
+        infer_result_valid = False
+        print('unable to get effective infer results, please check the stream log for details.')
+    elif infer_result[0].errorCode != 0:
+        infer_result_valid = False
+        print('GetProtobuf error. errorCode = {}, errorMsg= {}.'.format(
+            infer_result[0].errorCode, infer_result[0].data.decode()))
+
+    if not infer_result_valid:
+        return None
+
+    # get mxpi_tensorinfer0 output data
+    infer_result_list = MxpiDataType.MxpiTensorPackageList()
+    infer_result_list.ParseFromString(infer_result[0].messageBuf)
+    # get infer result data str
+    output_data_str = infer_result_list.tensorPackageVec[0].tensorVec[0].dataStr
+    output_data = np.frombuffer(output_data_str, dtype=np.float32)
+    # get predict probability with softmax function
+    pred_prop = np.exp(output_data) / sum(np.exp(output_data))
+    # get predict result
+    pred_result = np.argmax(pred_prop, axis=0)
+
+    # print result
+    print('origin output: ', output_data)
+    print('pred prob: ', pred_prop)
+    print('pred result: ', pred_result)
+
+    return pred_result
+
+
+def senti_analysis(sentence_word_vector, sentiment_pipeline_path, is_batch=False, batch_size=64):
+    """
+    sentiment analysis of review
+    :param sentence_word_vector: word vectors of reviews
+    :param sentiment_pipeline_path: pipeline path
+    :param is_batch: whether batch data
+    :param batch_size: batch size
+    :return: model inference result
+    """
+    stream_manager = StreamManagerApi()
+
+    # init stream manager
+    ret = stream_manager.InitManager()
+    if ret != 0:
+        print('Failed to init Stream manager, ret = {}.'.format(ret))
+        return None
+
+    if os.path.exists(sentiment_pipeline_path) != 1:
+        print('pipeline {} not exist.'.format(sentiment_pipeline_path))
+        return None
+
+    # create streams by pipeline config file
+    with open(sentiment_pipeline_path, 'rb') as f:
+        pipeline = f.read().replace(b'\r', b'').replace(b'\n', b'')
+    pipeline_str = pipeline
+    ret = stream_manager.CreateMultipleStreams(pipeline_str)
+    if ret != 0:
+        print('Failed to create Stream, ret = {}.'.format(ret))
+        return None
+
+    # config
+    stream_name = b'sentiment'
+    in_plugin_id = 0
+
+    # infer results
+    sentiment_result = np.empty(shape=(0, 1))
+
+    # Construct the input of the stream
+    if not is_batch:
+        # model infer
+        result = infer(stream_manager, stream_name, in_plugin_id, sentence_word_vector)
+
+        # check infer result
+        if result is None:
+            print('sentiment model infer error.')
+            # destroy streams
+            stream_manager.DestroyAllStreams()
+            return None
+        sentiment_result = result
+    else:
+        batch_idx = 0
+        processed = 0
+        total = sentence_word_vector.shape[0]
+        # batch infer not support, force assign batch size to 1
+        batch_size = 1
+
+        percent_mask = 100.
+        for i in range(0, total, batch_size):
+            batch_idx += 1
+            if i + batch_size > total:
+                word_vector = sentence_word_vector[i:]
+            else:
+                word_vector = sentence_word_vector[i:(i + batch_size)]
+
+            # model infer
+            result = infer(stream_manager, stream_name, in_plugin_id, word_vector)
+
+            # check infer result
+            if result is None:
+                print('sentiment model infer error on {}-th batch sentences.'.format(batch_idx))
+                break
+
+            # save infer result
+            processed += word_vector.shape[0]
+            sentiment_result = np.vstack([sentiment_result, [result]])
+            print('batch size: {}, processed {}-th batch sentences, '
+                  '[{}/{} ({:.0f}%)].'.format(batch_size, batch_idx,
+                                              processed, total,
+                                              percent_mask * processed / total))
+
+    # destroy streams
+    stream_manager.DestroyAllStreams()
+
+    return sentiment_result
diff --git a/official/nlp/lstm/infer/sdk/utils/__init__.py b/official/nlp/lstm/infer/sdk/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c31871f9cfe2cab72ba18f2cc5b5ae488672f88
--- /dev/null
+++ b/official/nlp/lstm/infer/sdk/utils/__init__.py
@@ -0,0 +1,16 @@
+# coding = utf-8
+"""
+Copyright 2021 Huawei Technologies Co., Ltd
+
+Licensed under the BSD 3-Clause License  (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+https://opensource.org/licenses/BSD-3-Clause
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
diff --git a/official/nlp/lstm/infer/sdk/utils/util.py b/official/nlp/lstm/infer/sdk/utils/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..1eabc6ea63902399ff4da4216b7529f96185d8f2
--- /dev/null
+++ b/official/nlp/lstm/infer/sdk/utils/util.py
@@ -0,0 +1,192 @@
+# coding = utf-8
+"""
+Copyright 2021 Huawei Technologies Co., Ltd
+
+Licensed under the BSD 3-Clause License  (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+https://opensource.org/licenses/BSD-3-Clause
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+
+import numpy as np
+
+from .word_vector_parser import WordVectorParser
+
+
+def convert_sentence_to_word_vector(sentence_dir, vocab_path):
+    """
+    generate word vector from sentence txt file
+    :param sentence_dir: dir where sentences store
+    :param vocab_path: path where vocab table store
+    :return: word vectors and input files
+    """
+    parser = WordVectorParser(sentence_dir, vocab_path)
+    parser.parse()
+
+    return parser.get_datas()
+
+
+def load_infer_input(input_sentences_dir, max_load_num=None):
+    """
+    load preprocessed data as model input
+    :param input_sentences_dir: path where preprocessed file store
+    :param max_load_num: the max number need to load
+    :return: sentence word vector and input file names
+    """
+    input_sentences_files = os.listdir(input_sentences_dir)
+    input_sentences_files.sort(key=lambda x: int(x[15:-4]))
+
+    batch_size = 64
+    sentence_word_vector_size = 500
+    max_load_num = len(input_sentences_files) if max_load_num is None else max_load_num
+
+    # load preprocessed data
+    sentence_word_vectors = np.empty(shape=(0, sentence_word_vector_size))
+    print('need load test sentences num: {}'.format(max_load_num * batch_size))
+    for i in range(0, max_load_num):
+        feature_path = os.path.join(input_sentences_dir, input_sentences_files[i])
+        print('load ', feature_path)
+        data = np.fromfile(feature_path, dtype=np.int32).reshape(batch_size, sentence_word_vector_size)
+        sentence_word_vectors = np.vstack([sentence_word_vectors, data])
+
+    return sentence_word_vectors, input_sentences_files[:max_load_num]
+
+
+def load_preprocess_test_set(test_set_feature_path, test_set_label_path, max_load_num=None):
+    """
+    load preprocessed test set (feature: .bin label: .npy)
+    :param test_set_feature_path: path where feature store
+    :param test_set_label_path: path where label store
+    :param max_load_num: the max number need to load
+    :return: sentence word vector, corresponding label and test set file names
+    """
+    test_set_feature_files = os.listdir(test_set_feature_path)
+    test_set_feature_files.sort(key=lambda x: int(x[15:-4]))
+
+    batch_size = 64
+    sentence_word_vector_size = 500
+    max_load_num = len(test_set_feature_files) if max_load_num is None else max_load_num
+
+    # load features
+    sentence_word_vectors = np.empty(shape=(0, sentence_word_vector_size))
+    print('need load test data num: {}'.format(max_load_num * batch_size))
+    for i in range(0, max_load_num):
+        feature_path = os.path.join(test_set_feature_path, test_set_feature_files[i])
+        print('load ', feature_path)
+        data = np.fromfile(feature_path, dtype=np.int32).reshape(batch_size, sentence_word_vector_size)
+        sentence_word_vectors = np.vstack([sentence_word_vectors, data])
+
+    # load labels
+    sentence_labels = np.load(test_set_label_path)
+    sentence_labels = sentence_labels[:max_load_num]
+    sentence_labels = sentence_labels.reshape(-1, 1).astype(np.int32)
+
+    return sentence_word_vectors, sentence_labels, test_set_feature_files[:max_load_num]
+
+
+def calculate_metrics(ground_truth, prediction):
+    """
+    calculate metrics including accuracy, precision of positive and negative,
+    recall of positive and negative, F1-score of positive and negative
+    :param ground_truth: actual labels
+    :param prediction: model inference result
+    :return: calculated metrics
+    """
+    confusion_matrix = np.empty(shape=(2, 2))
+    for i in range(0, 2):
+        for j in range(0, 2):
+            confusion_matrix[i, j] = np.sum(prediction[ground_truth == i] == j)
+
+    total = ground_truth.shape[0]
+    negative_gt_total = ground_truth[ground_truth == 0].shape[0]
+    positive_gt_total = ground_truth[ground_truth == 1].shape[0]
+    negative_pred_total = prediction[prediction == 0].shape[0]
+    positive_pred_total = prediction[prediction == 1].shape[0]
+
+    correct = prediction[prediction == ground_truth].shape[0]
+    negative_correct = confusion_matrix[0, 0]
+    positive_correct = confusion_matrix[1, 1]
+
+    total_accuracy = correct / total
+    negative_precision = negative_correct / negative_pred_total
+    positive_precision = positive_correct / positive_pred_total
+
+    negative_recall = negative_correct / negative_gt_total
+    positive_recall = positive_correct / positive_gt_total
+
+    negative_F1_score = 2 * negative_precision * negative_recall / (negative_precision + negative_recall)
+    positive_F1_score = 2 * positive_precision * positive_recall / (positive_precision + positive_recall)
+
+    print('confusion matrix:\n', confusion_matrix)
+    print('total accuracy: {}/{} ({})'.format(correct, total, total_accuracy))
+    print('negative precision: {}/{} ({})'.format(negative_correct, negative_pred_total, negative_precision))
+    print('positive precision: {}/{} ({})'.format(positive_correct, positive_pred_total, positive_precision))
+    print('negative recall: {}/{} ({})'.format(negative_correct, negative_gt_total, negative_recall))
+    print('positive recall: {}/{} ({})'.format(positive_correct, positive_gt_total, positive_recall))
+    print('negative_F1_score: {}'.format(negative_F1_score))
+    print('positive_F1_score: {}'.format(positive_F1_score))
+
+    metrics = {'confusion_matrix': confusion_matrix,
+               'total_accuracy': total_accuracy,
+               'negative_precision': negative_precision,
+               'positive_precision': positive_precision,
+               'negative_recall': negative_recall,
+               'positive_recall': positive_recall,
+               'negative_F1_score': negative_F1_score,
+               'positive_F1_score': positive_F1_score}
+
+    return metrics
+
+
+def write_infer_result_to_file(file_path, file_name, infer_files, results, is_raw_data):
+    """
+    save infer result to file
+    :param file_path: result dir
+    :param file_name: result file name
+    :param infer_files: infer file names
+    :param results: infer results
+    :param is_raw_data: whether preprocess files
+    :return: no return
+    """
+    if os.path.exists(file_path) != 1:
+        os.makedirs(file_path)
+
+    with open(os.path.join(file_path, file_name), 'w') as f:
+        if is_raw_data:
+            for i in range(0, len(infer_files)):
+                f.writelines('file: {}, result: {}\n'.format(infer_files[i], 'positive' if results[i] else 'negative'))
+        else:
+            f.writelines('files: {}, total reviews num: {}\n'.format(infer_files, results.shape[0]))
+            for i in range(0, results.shape[0]):
+                f.writelines('{}-th review: {}\n'.format(i + 1, 'positive' if results[i] else 'negative'))
+
+
+def write_eval_result_to_file(file_path, file_name, eval_result):
+    """
+    save eval result to file
+    :param file_path: result dir
+    :param file_name: result file name
+    :param eval_result: eval result
+    :return: no return
+    """
+    if os.path.exists(file_path) != 1:
+        os.makedirs(file_path)
+
+    with open(os.path.join(file_path, file_name), 'w') as f:
+        f.writelines('confusion matrix:\n {}\n'.format(eval_result.get('confusion_matrix')))
+        f.writelines('total accuracy: {}\n'.format(eval_result.get('total_accuracy')))
+        f.writelines('negative precision: {}\n'.format(eval_result.get('negative_precision')))
+        f.writelines('positive precision: {}\n'.format(eval_result.get('positive_precision')))
+        f.writelines('negative recall: {}\n'.format(eval_result.get('negative_recall')))
+        f.writelines('positive recall: {}\n'.format(eval_result.get('positive_recall')))
+        f.writelines('negative F1-score: {}\n'.format(eval_result.get('negative_F1_score')))
+        f.writelines('positive F1-score: {}\n'.format(eval_result.get('positive_F1_score')))
diff --git a/official/nlp/lstm/infer/sdk/utils/word_vector_parser.py b/official/nlp/lstm/infer/sdk/utils/word_vector_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..4816e1f7a5ba6536a6bba5031ec67737d6c25e0a
--- /dev/null
+++ b/official/nlp/lstm/infer/sdk/utils/word_vector_parser.py
@@ -0,0 +1,140 @@
+# coding = utf-8
+"""
+Copyright 2021 Huawei Technologies Co., Ltd
+
+Licensed under the BSD 3-Clause License  (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+https://opensource.org/licenses/BSD-3-Clause
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+
+import numpy as np
+
+
+class WordVectorParser:
+    """
+    parse sentences data to features
+    sentence->tokenized->encoded->padding->features
+    """
+
+    def __init__(self, sentences_path, vocab_path):
+        self.__sentence_dir = sentences_path
+        self.__vocab_path = vocab_path
+
+        # properties
+        self.__sentence_files = {}
+        self.__sentence_datas = {}
+        self.__features = {}
+        self.__vocab = {}
+        self.__word2idx = {}
+
+    def parse(self):
+        """
+        parse sentences data to memory
+        """
+        self.__read_sentences_data()
+        self.__parse_features()
+
+    def __read_sentences_data(self):
+        """
+        load data from txt
+        """
+        self.__sentence_files = os.listdir(self.__sentence_dir)
+
+        data_lists = []
+        for file in self.__sentence_files:
+            with open(os.path.join(self.__sentence_dir, file), mode='r', encoding='utf8') as f:
+                sentence = f.read().replace('\n', '')
+                data_lists.append(sentence)
+        self.__sentence_datas = data_lists
+
+    def __parse_features(self):
+        """
+        parse features
+        """
+        features = []
+        for sentence in self.__sentence_datas:
+            features.append(sentence)
+
+        self.__features = features
+
+        # update feature to tokenized
+        self.__updata_features_to_tokenized()
+        # parse vocab
+        self.__parse_vocab()
+        # encode feature
+        self.__encode_features()
+        # padding feature
+        self.__padding_features()
+
+    def __updata_features_to_tokenized(self):
+        """
+        split sentence to words
+        """
+        tokenized_features = []
+        for sentence in self.__features:
+            tokenized_sentence = [word.lower() for word in sentence.split(" ")]
+            tokenized_features.append(tokenized_sentence)
+        self.__features = tokenized_features
+
+    def __parse_vocab(self):
+        """
+        load vocab and generate word indexes
+        """
+        vocab = []
+        with open(self.__vocab_path, 'r') as vocab_file:
+            line = vocab_file.readline()
+            while line:
+                line = line.replace('\n', '')
+                vocab.append(line)
+                line = vocab_file.readline()
+        self.__vocab = vocab
+
+        word_to_idx = {word: i + 1 for i, word in enumerate(vocab)}
+        word_to_idx['<unk>'] = 0
+        self.__word2idx = word_to_idx
+
+    def __encode_features(self):
+        """
+        encode word to index
+        """
+        word_to_idx = self.__word2idx
+        encoded_features = []
+        for tokenized_sentence in self.__features:
+            encoded_sentence = []
+            for word in tokenized_sentence:
+                encoded_sentence.append(word_to_idx.get(word, 0))
+            encoded_features.append(encoded_sentence)
+        self.__features = encoded_features
+
+    def __padding_features(self, max_len=500, pad=0):
+        """
+        pad all features to the same length
+        """
+        padded_features = []
+        for feature in self.__features:
+            if len(feature) >= max_len:
+                padded_feature = feature[:max_len]
+            else:
+                padded_feature = feature
+                while len(padded_feature) < max_len:
+                    padded_feature.append(pad)
+            padded_features.append(padded_feature)
+        self.__features = padded_features
+
+    def get_datas(self):
+        """
+        get features
+        """
+        features = np.array(self.__features).astype(np.int32)
+        files = self.__sentence_files
+        return features, files
diff --git a/official/nlp/lstm/modelarts/data_process.py b/official/nlp/lstm/modelarts/data_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..90317b9bc26fa82cd3d86a147658c029f3d3fdcd
--- /dev/null
+++ b/official/nlp/lstm/modelarts/data_process.py
@@ -0,0 +1,250 @@
+# -*- encoding: utf-8 -*-
+"""
+  @Project: LSTM
+  @File: data_process.py
+  @Author: Joy
+  @Created Time: 2021-11-12 19:59
+"""
+import argparse
+import os
+import time
+
+import moxing as mox
+import numpy as np
+import mindspore.dataset as ds
+from mindspore import context
+
+from src.model_utils.device_adapter import get_device_id, get_device_num, get_rank_id
+
+parser = argparse.ArgumentParser(description='Natural Language Processing')
+
+# ModelArts config
+parser.add_argument("--enable_modelarts", type=bool, default=True, help="whether training on modelarts, default: True")
+parser.add_argument("--data_url", type=str, default="", help="dataset url for obs")
+parser.add_argument("--checkpoint_url", type=str, default="", help="checkpoint url for obs")
+parser.add_argument("--train_url", type=str, default="", help="training output url for obs")
+parser.add_argument("--data_path", type=str, default="/cache/data", help="dataset path for local")
+parser.add_argument("--load_path", type=str, default="/cache/checkpoint", help="dataset path for local")
+parser.add_argument("--output_path", type=str, default="/cache/train", help="training output path for local")
+parser.add_argument("--checkpoint_path", type=str, default="./checkpoint/",
+                    help="the path where pre-trained checkpoint file path")
+parser.add_argument("--checkpoint_file", type=str, default="./checkpoint/lstm-20_390.ckpt",
+                    help="the path where pre-trained checkpoint file name")
+parser.add_argument("--device_target", type=str, default='Ascend', choices=("Ascend", "GPU", "CPU"),
+                    help="Device target, support Ascend, GPU and CPU.")
+parser.add_argument("--enable_profiling", type=bool, default=False, help="whether enable modelarts profiling")
+
+# LSTM config
+parser.add_argument("--num_classes", type=int, default=2, help="output class num")
+parser.add_argument("--num_hiddens", type=int, default=128, help="number of hidden unit per layer")
+parser.add_argument("--num_layers", type=int, default=2, help="number of network layer")
+parser.add_argument("--learning_rate", type=float, default=0.1, help="static learning rate")
+parser.add_argument("--dynamic_lr", type=bool, default=False, help="dynamic learning rate")
+parser.add_argument("--lr_init", type=float, default=0.05,
+                    help="initial learning rate, effective when enable dynamic_lr")
+parser.add_argument("--lr_end", type=float, default=0.01, help="end learning rate, effective when enable dynamic_lr")
+parser.add_argument("--lr_max", type=float, default=0.1, help="maximum learning rate, effective when enable dynamic_lr")
+parser.add_argument("--lr_adjust_epoch", type=int, default=6,
+                    help="the epoch interval of adjusting learning rate, effective when enable dynamic_lr")
+parser.add_argument("--warmup_epochs", type=int, default=1,
+                    help="the epoch interval of warmup, effective when enable dynamic_lr")
+parser.add_argument("--global_step", type=int, default=0, help="global step of train, effective when enable dynamic_lr")
+parser.add_argument("--momentum", type=float, default=0.9, help="")
+parser.add_argument("--num_epochs", type=int, default=20, help="")
+parser.add_argument("--batch_size", type=int, default=64, help="")
+parser.add_argument("--embed_size", type=int, default=300, help="")
+parser.add_argument("--bidirectional", type=bool, default=True, help="whether enable bidirectional LSTM network")
+parser.add_argument("--save_checkpoint_steps", type=int, default=7800, help="")
+parser.add_argument("--keep_checkpoint_max", type=int, default=10, help="")
+
+# train config
+parser.add_argument("--preprocess", type=str, default='false', help="whether to preprocess data")
+parser.add_argument("--preprocess_path", type=str, default="./preprocess",
+                    help="path where the pre-process data is stored, "
+                         "if preprocess set as 'false', you need prepared preprocessed data under data_url")
+parser.add_argument("--aclImdb_zip_path", type=str, default="./aclImdb_v1.tar.gz", help="path where the dataset zip")
+parser.add_argument("--aclImdb_path", type=str, default="./aclImdb", help="path where the dataset is stored")
+parser.add_argument("--glove_path", type=str, default="./glove", help="path where the GloVe is stored")
+parser.add_argument("--ckpt_path", type=str, default="./ckpt_lstm/",
+                    help="the path to save the checkpoint file")
+parser.add_argument("--pre_trained", type=str, default="", help="the pretrained checkpoint file path")
+parser.add_argument("--device_num", type=int, default=1, help="the number of using devices")
+parser.add_argument("--distribute", type=bool, default=False, help="enable when training with multi-devices")
+parser.add_argument("--enable_graph_kernel", type=bool, default=True, help="whether accelerate by graph kernel")
+
+# export config
+parser.add_argument("--ckpt_file", type=str, default="./ckpt_lstm/lstm-20_390.ckpt", help="the export ckpt file name")
+parser.add_argument("--device_id", type=int, default=0, help="")
+parser.add_argument("--file_name", type=str, default="./lstm", help="the export air file name")
+parser.add_argument("--file_format", type=str, default="AIR", help="the export file format")
+
+# LSTM Postprocess config
+parser.add_argument("--label_dir", type=str, default="", help="")
+parser.add_argument("--result_dir", type=str, default="./result_Files", help="")
+
+# Preprocess config
+parser.add_argument("--result_path", type=str, default="./preprocess_Result/", help="")
+
+config = parser.parse_args()
+
+_global_sync_count = 0
+
+
+def sync_data(from_path, to_path):
+    """
+    Download data from remote obs to local directory if the first url is remote url and the second one is local path
+    Upload data from local directory to remote obs in contrast.
+    :param from_path: source path
+    :param to_path: target path
+    :return: no return
+    """
+    global _global_sync_count
+    sync_lock = "/tmp/copy_sync.lock" + str(_global_sync_count)
+    _global_sync_count += 1
+
+    # Each server contains 8 devices as most.
+    if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
+        print("from path: ", from_path)
+        print("to path: ", to_path)
+        mox.file.copy_parallel(from_path, to_path)
+        print("===finish data synchronization===")
+        try:
+            os.mknod(sync_lock)
+        except IOError:
+            print("Failed to create directory")
+        print("===save flag===")
+
+    while True:
+        if os.path.exists(sync_lock):
+            break
+        time.sleep(1)
+
+    print("Finish sync data from {} to {}.".format(from_path, to_path))
+
+
+def download_data():
+    """
+    sync data from data_url, train_url to data_path, output_path
+    :return: no return
+    """
+    if config.enable_modelarts:
+        if config.data_url:
+            if not os.path.isdir(config.data_path):
+                os.makedirs(config.data_path)
+                sync_data(config.data_url, config.data_path)
+                print("Dataset downloaded: ", os.listdir(config.data_path))
+        if config.checkpoint_url:
+            if not os.path.isdir(config.load_path):
+                os.makedirs(config.load_path)
+                sync_data(config.checkpoint_url, config.load_path)
+                print("Preload downloaded: ", os.listdir(config.load_path))
+        if config.train_url:
+            if not os.path.isdir(config.output_path):
+                os.makedirs(config.output_path)
+            sync_data(config.train_url, config.output_path)
+            print("Workspace downloaded: ", os.listdir(config.output_path))
+
+        context.set_context(save_graphs_path=os.path.join(config.output_path, str(get_rank_id())))
+        config.device_num = get_device_num()
+        config.device_id = get_device_id()
+        # create output dir
+        if not os.path.exists(config.output_path):
+            os.makedirs(config.output_path)
+
+
+def upload_data():
+    """
+    sync data from output_path to train_url
+    :return: no return
+    """
+    if config.enable_modelarts:
+        if config.train_url:
+            print("Start copy data to output directory.")
+            sync_data(config.output_path, config.train_url)
+            print("Copy data to output directory finished.")
+
+
+def modelarts_preprocess():
+    """
+    add path prefix, modify parameter and sync data
+    :return: no return
+    """
+    print("============== Starting ModelArts Preprocess ==============")
+    config.aclImdb_path = os.path.join(config.data_path, config.aclImdb_path)
+    config.aclImdb_zip_path = os.path.join(config.data_path, config.aclImdb_zip_path)
+    config.glove_path = os.path.join(config.data_path, config.glove_path)
+
+    config.file_name = os.path.join(config.output_path, config.file_name)
+    config.result_path = os.path.join(config.output_path, config.result_path)
+
+    if config.preprocess == 'true':
+        config.preprocess_path = os.path.join(config.output_path, config.preprocess_path)
+    else:
+        config.preprocess_path = os.path.join(config.data_path, config.preprocess_path)
+
+    # download data from obs
+    download_data()
+    print("============== ModelArts Preprocess finish ==============")
+
+
+def modelarts_postprocess():
+    """
+    convert lstm model to AIR format, sync data
+    :return: no return
+    """
+    print("============== Starting ModelArts Postprocess ==============")
+    # upload data to obs
+    upload_data()
+    print("============== ModelArts Postprocess finish ==============")
+
+
+def lstm_create_dataset(data_home, batch_size, repeat_num=1, training=True, device_num=1, rank=0):
+    """Data operations."""
+    ds.config.set_seed(1)
+    data_dir = os.path.join(data_home, "aclImdb_train.mindrecord0")
+    if not training:
+        data_dir = os.path.join(data_home, "aclImdb_test.mindrecord0")
+
+    data_set = ds.MindDataset(data_dir, columns_list=["feature", "label"], num_parallel_workers=4,
+                              num_shards=device_num, shard_id=rank)
+
+    # apply map operations on images
+    data_set = data_set.shuffle(buffer_size=data_set.get_dataset_size())
+    data_set = data_set.batch(batch_size=batch_size, drop_remainder=True)
+    data_set = data_set.repeat(count=repeat_num)
+
+    return data_set
+
+
+def covert_to_bin():
+    """
+    save dataset with bin format
+    :return: no return
+    """
+    dataset = lstm_create_dataset(config.preprocess_path, config.batch_size, training=False)
+    img_path = os.path.join(config.result_path, "00_data")
+    os.makedirs(img_path)
+    label_list = []
+    for i, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
+        file_name = "LSTM_data_bs" + str(config.batch_size) + "_" + str(i) + ".bin"
+        file_path = img_path + "/" + file_name
+        data['feature'].tofile(file_path)
+        label_list.append(data['label'])
+
+        print('processed {}.'.format(file_name))
+
+    # save as npy
+    np.save(config.result_path + "label_ids.npy", label_list)
+
+    # save as txt
+    sentence_labels = np.array(label_list)
+    sentence_labels = sentence_labels.reshape(-1, 1).astype(np.int32)
+    np.savetxt(config.result_path + "labels.txt", sentence_labels)
+
+    print("=" * 20, "export bin files finished", "=" * 20)
+
+
+if __name__ == "__main__":
+    modelarts_preprocess()
+    covert_to_bin()
+    modelarts_postprocess()
diff --git a/official/nlp/lstm/modelarts/pip-requirements.txt b/official/nlp/lstm/modelarts/pip-requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fa0a47edd05d49c931796c83cc3492c1aa9c0ca5
--- /dev/null
+++ b/official/nlp/lstm/modelarts/pip-requirements.txt
@@ -0,0 +1,2 @@
+gensim==4.0.1
+pyyaml==5.4.1
diff --git a/official/nlp/lstm/modelarts/train_start.py b/official/nlp/lstm/modelarts/train_start.py
new file mode 100644
index 0000000000000000000000000000000000000000..408f9c9371c512f10b196a43ee31c5cef345b761
--- /dev/null
+++ b/official/nlp/lstm/modelarts/train_start.py
@@ -0,0 +1,453 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""
+######################## train lstm example ########################
+train lstm and get network model files(.ckpt) :
+"""
+
+import argparse
+import glob
+import os
+import tarfile
+import time
+
+import moxing as mox
+import numpy as np
+import mindspore.nn as nn
+from mindspore import Tensor, context, export
+from mindspore.common import set_seed
+from mindspore.communication.management import init, get_rank
+from mindspore.context import ParallelMode
+from mindspore.nn.metrics import Accuracy
+from mindspore.profiler import Profiler
+from mindspore.train import Model
+from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+
+from src.dataset import convert_to_mindrecord, lstm_create_dataset
+from src.lr_schedule import get_lr
+from src.lstm import SentimentNet
+from src.model_utils.device_adapter import get_device_id, get_device_num, get_rank_id, get_job_id
+
+parser = argparse.ArgumentParser(description='Natural Language Processing')
+
+# ModelArts config
+parser.add_argument("--enable_modelarts", type=bool, default=True, help="whether training on modelarts, default: True")
+parser.add_argument("--data_url", type=str, default="", help="dataset url for obs")
+parser.add_argument("--checkpoint_url", type=str, default="", help="checkpoint url for obs")
+parser.add_argument("--train_url", type=str, default="", help="training output url for obs")
+parser.add_argument("--data_path", type=str, default="/cache/data", help="dataset path for local")
+parser.add_argument("--load_path", type=str, default="/cache/checkpoint", help="dataset path for local")
+parser.add_argument("--output_path", type=str, default="/cache/train", help="training output path for local")
+parser.add_argument("--checkpoint_path", type=str, default="./checkpoint/",
+                    help="the path where pre-trained checkpoint file path")
+parser.add_argument("--checkpoint_file", type=str, default="./checkpoint/lstm-20_390.ckpt",
+                    help="the path where pre-trained checkpoint file name")
+parser.add_argument("--device_target", type=str, default='Ascend', choices=("Ascend", "GPU", "CPU"),
+                    help="Device target, support Ascend, GPU and CPU.")
+parser.add_argument("--enable_profiling", type=bool, default=False, help="whether enable modelarts profiling")
+
+# LSTM config
+parser.add_argument("--num_classes", type=int, default=2, help="output class num")
+parser.add_argument("--num_hiddens", type=int, default=128, help="number of hidden unit per layer")
+parser.add_argument("--num_layers", type=int, default=2, help="number of network layer")
+parser.add_argument("--learning_rate", type=float, default=0.1, help="static learning rate")
+parser.add_argument("--dynamic_lr", type=bool, default=False, help="dynamic learning rate")
+parser.add_argument("--lr_init", type=float, default=0.05,
+                    help="initial learning rate, effective when enable dynamic_lr")
+parser.add_argument("--lr_end", type=float, default=0.01, help="end learning rate, effective when enable dynamic_lr")
+parser.add_argument("--lr_max", type=float, default=0.1, help="maximum learning rate, effective when enable dynamic_lr")
+parser.add_argument("--lr_adjust_epoch", type=int, default=6,
+                    help="the epoch interval of adjusting learning rate, effective when enable dynamic_lr")
+parser.add_argument("--warmup_epochs", type=int, default=1,
+                    help="the epoch interval of warmup, effective when enable dynamic_lr")
+parser.add_argument("--global_step", type=int, default=0, help="global step of train, effective when enable dynamic_lr")
+parser.add_argument("--momentum", type=float, default=0.9, help="")
+parser.add_argument("--num_epochs", type=int, default=20, help="")
+parser.add_argument("--batch_size", type=int, default=64, help="")
+parser.add_argument("--embed_size", type=int, default=300, help="")
+parser.add_argument("--bidirectional", type=bool, default=True, help="whether enable bidirectional LSTM network")
+parser.add_argument("--save_checkpoint_steps", type=int, default=7800, help="")
+parser.add_argument("--keep_checkpoint_max", type=int, default=10, help="")
+
+# train config
+parser.add_argument("--preprocess", type=str, default='false', help="whether to preprocess data")
+parser.add_argument("--preprocess_path", type=str, default="./preprocess",
+                    help="path where the pre-process data is stored, "
+                         "if preprocess set as 'false', you need prepared preprocessed data under data_url")
+parser.add_argument("--aclImdb_zip_path", type=str, default="./aclImdb_v1.tar.gz", help="path where the dataset zip")
+parser.add_argument("--aclImdb_path", type=str, default="./aclImdb", help="path where the dataset is stored")
+parser.add_argument("--glove_path", type=str, default="./glove", help="path where the GloVe is stored")
+parser.add_argument("--ckpt_path", type=str, default="./ckpt_lstm/",
+                    help="the path to save the checkpoint file")
+parser.add_argument("--pre_trained", type=str, default="", help="the pretrained checkpoint file path")
+parser.add_argument("--device_num", type=int, default=1, help="the number of using devices")
+parser.add_argument("--distribute", type=bool, default=False, help="enable when training with multi-devices")
+parser.add_argument("--enable_graph_kernel", type=bool, default=True, help="whether accelerate by graph kernel")
+
+# export config
+parser.add_argument("--ckpt_file", type=str, default="./ckpt_lstm/lstm-20_390.ckpt", help="the export ckpt file name")
+parser.add_argument("--device_id", type=int, default=0, help="")
+parser.add_argument("--file_name", type=str, default="./lstm", help="the export air file name")
+parser.add_argument("--file_format", type=str, default="AIR", help="the export file format")
+
+# LSTM Postprocess config
+parser.add_argument("--label_dir", type=str, default="", help="")
+parser.add_argument("--result_dir", type=str, default="./result_Files", help="")
+
+# Preprocess config
+parser.add_argument("--result_path", type=str, default="./preprocess_Result/", help="")
+
+config = parser.parse_args()
+
+set_seed(1)
+_global_sync_count = 0
+profiler = None
+ckpt_save_dir = ''
+embedding_table = None
+
+
+def unzip(file_name, dirs):
+    """
+    unzip dataset in tar.gz.format
+    :param file_name: file to be unzipped
+    :param dirs: unzip path
+    :return: no return
+    """
+    if os.path.exists(file_name) != 1:
+        raise FileNotFoundError('{} not exist'.format(file_name))
+
+    print('unzip {} start.'.format(file_name))
+    t = tarfile.open(file_name)
+    t.extractall(path=dirs)
+    print('unzip {} end.'.format(file_name))
+
+
+def frozen_to_air(net, args):
+    """
+    export trained model with specific format
+    :param net: model object
+    :param args: frozen arguments
+    :return: no return
+    """
+    param_dict = load_checkpoint(args.get("ckpt_file"))
+    load_param_into_net(net, param_dict)
+    input_arr = args.get('input_arr')
+    export(net, input_arr, file_name=args.get("file_name"), file_format=args.get("file_format"))
+
+
+def sync_data(from_path, to_path):
+    """
+    Download data from remote obs to local directory if the first url is remote url and the second one is local path
+    Upload data from local directory to remote obs in contrast.
+    :param from_path: source path
+    :param to_path: target path
+    :return: no return
+    """
+    global _global_sync_count
+    sync_lock = "/tmp/copy_sync.lock" + str(_global_sync_count)
+    _global_sync_count += 1
+
+    # Each server contains 8 devices as most.
+    if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
+        print("from path: ", from_path)
+        print("to path: ", to_path)
+        mox.file.copy_parallel(from_path, to_path)
+        print("===finish data synchronization===")
+        try:
+            os.mknod(sync_lock)
+        except IOError:
+            print("Failed to create directory")
+        print("===save flag===")
+
+    while True:
+        if os.path.exists(sync_lock):
+            break
+        time.sleep(1)
+
+    print("Finish sync data from {} to {}.".format(from_path, to_path))
+
+
+def download_data():
+    """
+    sync data from data_url, train_url to data_path, output_path
+    :return: no return
+    """
+    if config.enable_modelarts:
+        if config.data_url:
+            if not os.path.isdir(config.data_path):
+                os.makedirs(config.data_path)
+                sync_data(config.data_url, config.data_path)
+                print("Dataset downloaded: ", os.listdir(config.data_path))
+        if config.checkpoint_url:
+            if not os.path.isdir(config.load_path):
+                os.makedirs(config.load_path)
+                sync_data(config.checkpoint_url, config.load_path)
+                print("Preload downloaded: ", os.listdir(config.load_path))
+        if config.train_url:
+            if not os.path.isdir(config.output_path):
+                os.makedirs(config.output_path)
+            sync_data(config.train_url, config.output_path)
+            print("Workspace downloaded: ", os.listdir(config.output_path))
+
+        context.set_context(save_graphs_path=os.path.join(config.output_path, str(get_rank_id())))
+        config.device_num = get_device_num()
+        config.device_id = get_device_id()
+        # create output dir
+        if not os.path.exists(config.output_path):
+            os.makedirs(config.output_path)
+
+
+def upload_data():
+    """
+    sync data from output_path to train_url
+    :return: no return
+    """
+    if config.enable_modelarts:
+        if config.train_url:
+            print("Start copy data to output directory.")
+            sync_data(config.output_path, config.train_url)
+            print("Copy data to output directory finished.")
+
+
+def modelarts_preprocess():
+    """
+    add path prefix, modify parameter and sync data
+    :return: no return
+    """
+    print("============== Starting ModelArts Preprocess ==============")
+    config.aclImdb_path = os.path.join(config.data_path, config.aclImdb_path)
+    config.aclImdb_zip_path = os.path.join(config.data_path, config.aclImdb_zip_path)
+    config.glove_path = os.path.join(config.data_path, config.glove_path)
+
+    config.ckpt_path = os.path.join(config.output_path, config.ckpt_path)
+    config.ckpt_file = os.path.join(config.output_path, config.ckpt_file)
+    config.file_name = os.path.join(config.output_path, config.file_name)
+
+    if config.preprocess == 'true':
+        config.preprocess_path = os.path.join(config.output_path, config.preprocess_path)
+    else:
+        config.preprocess_path = os.path.join(config.data_path, config.preprocess_path)
+
+    # create profiler
+    global profiler
+    if config.enable_profiling:
+        profiler = Profiler()
+
+    # download data from obs
+    download_data()
+
+    # unzip dataset zip
+    if config.preprocess == 'true':
+        unzip(config.aclImdb_zip_path, config.data_path)
+    print("============== ModelArts Preprocess finish ==============")
+
+
+def modelarts_postprocess():
+    """
+    convert lstm model to AIR format, sync data
+    :return: no return
+    """
+    print("============== Starting ModelArts Postprocess ==============")
+    # get trained lstm checkpoint
+    ckpt_list = glob.glob(str(ckpt_save_dir) + "/*lstm*.ckpt")
+    if not ckpt_list:
+        print("ckpt file not generated")
+        ckpt_list.sort(key=os.path.getmtime)
+    ckpt_model = ckpt_list[-1]
+
+    # export LSTM with AIR format
+    export_lstm(ckpt_model)
+
+    # analyse
+    if config.enable_profiling and profiler is not None:
+        profiler.analyse()
+
+    # upload data to obs
+    upload_data()
+    print("============== ModelArts Postprocess finish ==============")
+
+
+def export_lstm(ckpt_model):
+    """
+    covert ckpt to AIR and export lstm model
+    :param ckpt_model: trained checkpoint
+    :return: no return
+    """
+    print("start frozen model to AIR.")
+    global embedding_table
+    if embedding_table is None:
+        print('embedding_table is None, re-generate')
+        embedding_table = np.loadtxt(os.path.join(config.preprocess_path, "weight.txt")).astype(np.float32)
+
+        if config.device_target == 'Ascend':
+            pad_num = int(np.ceil(config.embed_size / 16) * 16 - config.embed_size)
+            if pad_num > 0:
+                embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)], 'constant')
+            config.embed_size = int(np.ceil(config.embed_size / 16) * 16)
+
+    net = SentimentNet(vocab_size=embedding_table.shape[0],
+                       embed_size=config.embed_size,
+                       num_hiddens=config.num_hiddens,
+                       num_layers=config.num_layers,
+                       bidirectional=config.bidirectional,
+                       num_classes=config.num_classes,
+                       weight=Tensor(embedding_table),
+                       batch_size=config.batch_size)
+
+    frozen_to_air_args = {"ckpt_file": ckpt_model,
+                          "batch_size": config.batch_size,
+                          "input_arr": Tensor(
+                              np.random.uniform(0.0, 1e5, size=[config.batch_size, 500]).astype(np.int32)),
+                          "file_name": config.file_name,
+                          "file_format": config.file_format}
+
+    # convert model to AIR format
+    frozen_to_air(net, frozen_to_air_args)
+    print("Frozen model to AIR finish.")
+
+
+def train_lstm():
+    """
+    train lstm
+    :return:  no return
+    """
+    # print train work info
+    print(config)
+    print('device id:', get_device_id())
+    print('device num:', get_device_num())
+    print('rank id:', get_rank_id())
+    print('job id:', get_job_id())
+
+    # set context
+    device_target = config.device_target
+    _enable_graph_kernel = config.enable_graph_kernel and device_target == "GPU"
+    context.set_context(
+        mode=context.GRAPH_MODE,
+        save_graphs=False,
+        enable_graph_kernel=_enable_graph_kernel,
+        graph_kernel_flags="--enable_cluster_ops=MatMul",
+        device_target=config.device_target)
+
+    # enable parallel train
+    device_num = config.device_num
+    rank = 0
+    if device_num > 1 or config.distribute:
+        context.reset_auto_parallel_context()
+        context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
+                                          gradients_mean=True)
+        if device_target == "Ascend":
+            context.set_context(device_id=get_device_id())
+            init()
+            rank = get_rank()
+
+        elif device_target == "GPU":
+            init()
+    else:
+        context.set_context(device_id=get_device_id())
+
+    # dataset preprocess
+    if config.preprocess == 'true':
+        import shutil
+        if os.path.exists(config.preprocess_path):
+            shutil.rmtree(config.preprocess_path)
+        print("============== Starting Data Pre-processing ==============")
+        convert_to_mindrecord(config.embed_size, config.aclImdb_path, config.preprocess_path, config.glove_path)
+        print("==============    Data Pre-processing End   ==============")
+
+    # prepare train dataset
+    print('prepare train dataset with preprocessed data in {}, batch size: {}.'.
+          format(config.preprocess_path, config.batch_size))
+    ds_train = lstm_create_dataset(config.preprocess_path, config.batch_size, 1, device_num=device_num, rank=rank)
+    if ds_train.get_dataset_size() == 0:
+        raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
+    print('train dataset prepare finished.')
+
+    # init embedding_table
+    print('init embedding table from {}.'.format(os.path.join(config.preprocess_path, "weight.txt")))
+    global embedding_table
+    embedding_table = np.loadtxt(os.path.join(config.preprocess_path, "weight.txt")).astype(np.float32)
+    # DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size
+    # and hidden_size is multiples of 16, this problem will be solved later.
+    if config.device_target == 'Ascend':
+        pad_num = int(np.ceil(config.embed_size / 16) * 16 - config.embed_size)
+        if pad_num > 0:
+            embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)], 'constant')
+        config.embed_size = int(np.ceil(config.embed_size / 16) * 16)
+    print('init embedding table finished.')
+
+    # set loss function
+    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
+
+    # init learning rate
+    if config.dynamic_lr:
+        print('generate dynamic learning rate start.')
+        lr = Tensor(get_lr(global_step=config.global_step,
+                           lr_init=config.lr_init, lr_end=config.lr_end, lr_max=config.lr_max,
+                           warmup_epochs=config.warmup_epochs,
+                           total_epochs=config.num_epochs,
+                           steps_per_epoch=ds_train.get_dataset_size(),
+                           lr_adjust_epoch=config.lr_adjust_epoch))
+        print('generate dynamic learning rate finished.')
+    else:
+        lr = config.learning_rate
+
+    # init LSTM network
+    network = SentimentNet(vocab_size=embedding_table.shape[0],
+                           embed_size=config.embed_size,
+                           num_hiddens=config.num_hiddens,
+                           num_layers=config.num_layers,
+                           bidirectional=config.bidirectional,
+                           num_classes=config.num_classes,
+                           weight=Tensor(embedding_table),
+                           batch_size=config.batch_size)
+
+    # load pre-trained model parameter
+    if config.pre_trained:
+        print('load pre-trained checkpoint from {}.'.format(config.pre_trained))
+        load_param_into_net(network, load_checkpoint(config.pre_trained))
+        print('load pre-trained checkpoint finished.')
+
+    # init optimizer
+    opt = nn.Momentum(network.trainable_params(), lr, config.momentum)
+
+    # wrap LSTM network
+    model = Model(network, loss, opt, {'acc': Accuracy()})
+
+    global ckpt_save_dir
+    if device_num > 1:
+        ckpt_save_dir = os.path.join(config.ckpt_path + "_" + str(get_rank()))
+    else:
+        ckpt_save_dir = config.ckpt_path
+
+    config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_steps,
+                                 keep_checkpoint_max=config.keep_checkpoint_max)
+    ckpoint_cb = ModelCheckpoint(prefix="lstm", directory=ckpt_save_dir, config=config_ck)
+    time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
+    loss_cb = LossMonitor()
+
+    print("============== Starting Training ==============")
+    if config.device_target == "CPU":
+        model.train(config.num_epochs, ds_train, callbacks=[time_cb, ckpoint_cb, loss_cb], dataset_sink_mode=False)
+    else:
+        model.train(config.num_epochs, ds_train, callbacks=[time_cb, ckpoint_cb, loss_cb])
+    print("============== Training Success ==============")
+
+
+if __name__ == "__main__":
+    modelarts_preprocess()
+    train_lstm()
+    modelarts_postprocess()
diff --git a/official/nlp/lstm/scripts/docker_start.sh b/official/nlp/lstm/scripts/docker_start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..20c8298824f1c66b50d1911006aaa17faf7ef1ef
--- /dev/null
+++ b/official/nlp/lstm/scripts/docker_start.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_dir=$2
+model_dir=$3
+
+docker run -it --ipc=host \
+               --device=/dev/davinci0 \
+               --device=/dev/davinci1 \
+               --device=/dev/davinci2 \
+               --device=/dev/davinci3 \
+               --device=/dev/davinci4 \
+               --device=/dev/davinci5 \
+               --device=/dev/davinci6 \
+               --device=/dev/davinci7 \
+               --device=/dev/davinci_manager \
+               --device=/dev/devmm_svm --device=/dev/hisi_hdc \
+               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons/ \
+               -v ${model_dir}:${model_dir} \
+               -v ${data_dir}:${data_dir} \
+               -v /root/ascend/log:/root/ascend/log ${docker_image} \
+               /bin/bash