diff --git a/.jenkins/check/config/filter_cpplint.txt b/.jenkins/check/config/filter_cpplint.txt
index 0d315f8631c7f6189e8ef2797306b662dab3fb70..73bbdf7aa8cb268c3c98507d37d766261ff5bf80 100644
--- a/.jenkins/check/config/filter_cpplint.txt
+++ b/.jenkins/check/config/filter_cpplint.txt
@@ -33,6 +33,8 @@
 "models/research/cv/squeezenet1_1/infer/mxbase/Squeezenet1_1ClassifyOpencv.cpp"  "runtime/references"
 "models/official/cv/vgg16/infer/mxbase/src/Vgg16Classify.h"  "runtime/references"
 "models/official/cv/resnext/infer/mxbase/src/resnext50Classify.h" "runtime/references"
+"models/official/cv/cnn_direction_model/infer/mxbase/src/Cnndirection.h" "runtime/references"
+"models/official/cv/cnn_direction_model/infer/mxbase/src/main.cpp" "runtime/references"
 
 "models/research/cv/SE-Net/infer/mxbase/src/Senet_resnet50_opencv.h"                 "runtime/references"
 "models/research/cv/SE-Net/infer/mxbase/src/main.cpp"                                "runtime/references"
diff --git a/official/cv/cnn_direction_model/infer/convert/air2om.sh b/official/cv/cnn_direction_model/infer/convert/air2om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..40bd0dc3b6d0d9506da790ed7500f5c25887e488
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/convert/air2om.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+model_path=$1
+output_model_name=$2
+
+/usr/local/Ascend/atc/bin/atc --model=$model_path \
+        --framework=1 \
+        --output=$output_model_name \
+        --input_format=NCHW \
+        --soc_version=Ascend310 \
+        --output_type=FP32
diff --git a/official/cv/cnn_direction_model/infer/data/config/cnndirection.pipeline b/official/cv/cnn_direction_model/infer/data/config/cnndirection.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..90cad4f7391815f3aefea6849743766537926b88
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/data/config/cnndirection.pipeline
@@ -0,0 +1,37 @@
+{
+    "cnn_direction": {
+        "stream_config": {
+            "deviceId": "0"
+        },
+        "appsrc0": {
+            "props": {
+                "blocksize": "409600"
+            },
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "appsrc0",
+                "modelPath": "../data/models/cnn.om",
+                "waitingTime": "3000",
+                "outputDeviceId": "-1"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "mxpi_dataserialize0"
+        },
+        "mxpi_dataserialize0": {
+            "props": {
+                "outputDataKeys": "mxpi_tensorinfer0"
+            },
+            "factory": "mxpi_dataserialize",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "props": {
+                "blocksize": "4096000"
+            },
+            "factory": "appsink"
+        }
+    }
+}
diff --git a/official/cv/cnn_direction_model/infer/dataprocess/create_mindrecord.py b/official/cv/cnn_direction_model/infer/dataprocess/create_mindrecord.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab6b928fc583c6acfd6881cf1b3d2bd15042de4d
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/dataprocess/create_mindrecord.py
@@ -0,0 +1,110 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import os
+from mindspore.mindrecord import FileWriter
+from model_utils.config import config
+
+FAIL = 1
+SUCCESS = 0
+
+def get_images(image_dir, annot_files):
+    """
+    Get file paths that are in image_dir, annotation file is used to get the file names.
+
+    Args:
+        image_dir(string): images directory.
+        annot_files(list(string)) : annotation files.
+
+    Returns:
+        status code(int), status of process(string), image ids(list(int)), image paths(dict(int,string))
+    """
+    print("Process [Get Images] started")
+    if not os.path.isdir(image_dir):
+        return FAIL, "{} is not a directory. Please check the src/config.py file.".format(image_dir), [], {}
+    image_files_dict = {}
+    images = []
+    img_id = 0
+    # create a dictionary of image file paths
+    for annot_file in annot_files:
+        if not os.path.exists(annot_file):
+            return FAIL, "{} was not found.".format(annot_file), [], {}
+        lines = open(annot_file, 'r').readlines()
+        for line in lines:
+            # extract file name
+            file_name = line.split('\t')[0]
+            if file_name == '\n' or len(file_name) == 1:
+                continue
+            image_path = os.path.join(image_dir, file_name.replace("\\", '/'))
+            if not os.path.isfile(image_path):
+                return FAIL, "{} is not a file.".format(image_path), [], {}
+            # add path to dictionary
+            images.append(img_id)
+            image_files_dict[img_id] = image_path
+            img_id += 1
+    return SUCCESS, "Successfully retrieved {} images.".format(str(len(images))), images, image_files_dict
+
+def write_mindrecord_images(image_ids, image_dict, mindrecord_dir, data_schema, file_num=1):
+    writer = FileWriter(os.path.join(mindrecord_dir, config.dataset_name + ".mindrecord"), shard_num=file_num)
+    writer.add_schema(data_schema, config.dataset_name)
+    len_image_dict = len(image_dict)
+    sample_count = 0
+    for img_id in image_ids:
+        image_path = image_dict[img_id]
+        with open(image_path, 'rb') as f:
+            img = f.read()
+        row = {"image": img}
+        sample_count += 1
+        writer.write_raw_data([row])
+        print("Progress {} / {}".format(str(sample_count), str(len_image_dict)), end='\r')
+    writer.commit()
+
+def create_mindrecord():
+
+    annot_files_train = [config.train_annotation_file]
+    annot_files_test = [config.test_annotation_file]
+    ret_code, ret_message, images_train, image_path_dict_train = get_images(image_dir=config.data_root_train,
+                                                                            annot_files=annot_files_train)
+    if ret_code != SUCCESS:
+        return ret_code, ret_message, "", ""
+    ret_code, ret_message, images_test, image_path_dict_test = get_images(image_dir=config.data_root_test,
+                                                                          annot_files=annot_files_test)
+    if ret_code != SUCCESS:
+        return ret_code, ret_message, "", ""
+    data_schema = {"image": {"type": "bytes"}}
+    train_target = os.path.join(config.mindrecord_dir, "train")
+    test_target = os.path.join(config.mindrecord_dir, "test")
+    if not os.path.exists(train_target):
+        os.mkdir(train_target)
+    if not os.path.exists(test_target):
+        os.mkdir(test_target)
+    print("Creating training mindrecords: ")
+    write_mindrecord_images(images_train, image_path_dict_train, train_target, data_schema)
+    print("Creating test mindrecords: ")
+    write_mindrecord_images(images_test, image_path_dict_test, test_target, data_schema)
+    return SUCCESS, "Successful mindrecord creation.", train_target, test_target
+
+
+
+
+if __name__ == "__main__":
+    # start creating mindrecords from raw images and annots
+    # provide root path to raw data in the config file
+    code, message, train_target_dir, test_target_dir = create_mindrecord()
+    if code != SUCCESS:
+        print("Process done with status code: {}. Error: {}".format(code, message))
+    else:
+        print("Process done with status: {}. Training and testing data are saved to {} and {} respectively."
+              .format(message, train_target_dir, test_target_dir))
diff --git a/official/cv/cnn_direction_model/infer/dataprocess/fsns_char_map.json b/official/cv/cnn_direction_model/infer/dataprocess/fsns_char_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..564c73b8fd058bb2d2fae4fefc552bcdaf3a4049
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/dataprocess/fsns_char_map.json
@@ -0,0 +1,136 @@
+{
+    "0": 9250,
+    "1": 108,
+    "2": 8216,
+    "3": 233,
+    "4": 116,
+    "5": 101,
+    "6": 105,
+    "7": 110,
+    "8": 115,
+    "9": 120,
+    "10": 103,
+    "11": 117,
+    "12": 111,
+    "13": 49,
+    "14": 56,
+    "15": 55,
+    "16": 48,
+    "17": 8212,
+    "18": 46,
+    "19": 112,
+    "20": 97,
+    "21": 114,
+    "22": 232,
+    "23": 100,
+    "24": 99,
+    "25": 86,
+    "26": 118,
+    "27": 98,
+    "28": 109,
+    "29": 41,
+    "30": 67,
+    "31": 122,
+    "32": 83,
+    "33": 121,
+    "34": 44,
+    "35": 107,
+    "36": 201,
+    "37": 65,
+    "38": 104,
+    "39": 69,
+    "40": 187,
+    "41": 68,
+    "42": 47,
+    "43": 72,
+    "44": 77,
+    "45": 40,
+    "46": 71,
+    "47": 80,
+    "48": 231,
+    "49": 82,
+    "50": 102,
+    "51": 8221,
+    "52": 50,
+    "53": 106,
+    "54": 124,
+    "55": 78,
+    "56": 54,
+    "57": 176,
+    "58": 53,
+    "59": 84,
+    "60": 79,
+    "61": 85,
+    "62": 51,
+    "63": 37,
+    "64": 57,
+    "65": 113,
+    "66": 90,
+    "67": 66,
+    "68": 75,
+    "69": 119,
+    "70": 87,
+    "71": 58,
+    "72": 52,
+    "73": 76,
+    "74": 70,
+    "75": 93,
+    "76": 239,
+    "77": 73,
+    "78": 74,
+    "79": 228,
+    "80": 238,
+    "81": 59,
+    "82": 224,
+    "83": 234,
+    "84": 88,
+    "85": 252,
+    "86": 89,
+    "87": 244,
+    "88": 61,
+    "89": 43,
+    "90": 92,
+    "91": 123,
+    "92": 125,
+    "93": 95,
+    "94": 81,
+    "95": 339,
+    "96": 241,
+    "97": 42,
+    "98": 33,
+    "99": 220,
+    "100": 226,
+    "101": 199,
+    "102": 338,
+    "103": 251,
+    "104": 63,
+    "105": 36,
+    "106": 235,
+    "107": 171,
+    "108": 8364,
+    "109": 38,
+    "110": 60,
+    "111": 230,
+    "112": 35,
+    "113": 174,
+    "114": 194,
+    "115": 200,
+    "116": 62,
+    "117": 91,
+    "118": 198,
+    "119": 249,
+    "120": 206,
+    "121": 212,
+    "122": 255,
+    "123": 192,
+    "124": 202,
+    "125": 64,
+    "126": 207,
+    "127": 169,
+    "128": 203,
+    "129": 217,
+    "130": 163,
+    "131": 376,
+    "132": 219,
+    "133": 32
+}
diff --git a/official/cv/cnn_direction_model/infer/dataprocess/tfcord_to_image_sdk.py b/official/cv/cnn_direction_model/infer/dataprocess/tfcord_to_image_sdk.py
new file mode 100644
index 0000000000000000000000000000000000000000..37bf0339d9d2624fd1bff3ba7fa8c8844490e88a
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/dataprocess/tfcord_to_image_sdk.py
@@ -0,0 +1,156 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import argparse
+import csv
+
+import os
+import re
+from io import BytesIO
+import json
+import itertools
+import numpy as np
+import tensorflow as tf
+from PIL import Image
+import cv2
+from tqdm import tqdm
+
+
+image_width = 64
+image_height = 512
+
+def resize_image(pic):
+    color_fill = 255
+    scale = image_height / pic.shape[0]
+    pic = cv2.resize(pic, None, fx=scale, fy=scale)
+    if pic.shape[1] > image_width:
+        pic = pic[:, 0:image_width]
+    else:
+        blank_img = np.zeros((image_height, image_width, 3), np.uint8)
+        # fill the image with white
+        blank_img.fill(color_fill)
+        blank_img[:image_height, :pic.shape[1]] = pic
+        pic = blank_img
+    data = np.array([pic[...]], np.float32)
+    data = data / 127.5 - 1
+    return data
+
+
+
+FILENAME_PATTERN = re.compile(r'.+-(\d+)-of-(\d+)')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='tool that takes tfrecord files and \
+                                     extracts all images + labels from it')
+    parser.add_argument('tfrecord_dir', default='./data/val', help='path to directory containing tfrecord files')
+    parser.add_argument('destination_dir', default='./data', help='path to dir where resulting images shall be saved')
+    parser.add_argument('stage', default='train', help='stage of training these files are for [e.g. train]')
+    parser.add_argument('char_map', help='path to fsns char map')
+    parser.add_argument('destination', help='path to destination gt file')
+    parser.add_argument('--max-words', type=int, default=6, help='max words per image')
+    parser.add_argument('--min-words', type=int, default=1, help='min words per image')
+    parser.add_argument('--max-chars', type=int, default=21, help='max characters per word')
+    parser.add_argument('--word-gt', action='store_true', default=False, help='input gt is word level gt')
+    parser.add_argument('--blank-label', default='133', help='class number of blank label')
+
+    args = parser.parse_args()
+
+    os.makedirs(args.destination_dir, exist_ok=True)
+
+    tfrecord_files = os.listdir(args.tfrecord_dir)
+    tfrecord_files = sorted(tfrecord_files, key=lambda x: int(FILENAME_PATTERN.match(x).group(1)))
+    fsns_gt = os.path.join(args.destination_dir, '{}.csv'.format(args.stage))
+    with open(fsns_gt, 'w') as label_file:
+        writer = csv.writer(label_file, delimiter='\t')
+        idx_tmp = 0
+        for tfrecord_file in tfrecord_files:
+            tfrecord_filename = os.path.join(args.tfrecord_dir, tfrecord_file)
+
+            file_id = '00000'
+            dest_dir = os.path.join(args.destination_dir, args.stage, file_id)
+            os.makedirs(dest_dir, exist_ok=True)
+
+            record_iterator = tf.compat.v1.python_io.tf_record_iterator(path=tfrecord_filename)
+
+            for idx, string_record in enumerate(record_iterator):
+                idx_tmp += 1
+                example = tf.train.Example()
+                example.ParseFromString(string_record)
+
+                labels = example.features.feature['image/class'].int64_list.value
+                img_string = example.features.feature['image/encoded'].bytes_list.value[0]
+
+                image = Image.open(BytesIO(img_string))
+                img = np.array(image)
+
+                img = img[:150, :150, :]
+                im = Image.fromarray(img)
+                if np.random.rand() > 0.5:
+                    file_name = os.path.join(dest_dir, '{}_1.jpg'.format(idx_tmp))
+                    im.save(file_name)
+
+                    label_file_data = [os.path.join(args.stage, file_id, '{}_1.jpg'.format(idx_tmp))]
+                    label_file_data.extend(labels)
+                    writer.writerow(label_file_data)
+                else:
+                    # rot image
+                    img_rotate = np.rot90(img)
+                    img = np.rot90(img_rotate)
+                    img_rot_string = img.tobytes()
+                    im = Image.fromarray(img)
+                    file_name = os.path.join(dest_dir, '{}_0.jpg'.format(idx_tmp))
+                    im.save(file_name)
+
+                    label_file_data = [os.path.join(args.stage, file_id, '{}_0.jpg'.format(idx_tmp))]
+                    label_file_data.extend(labels)
+                    writer.writerow(label_file_data)
+                print("recovered {:0>6} files".format(idx), end='\r')
+
+    with open(args.char_map) as c_map:
+        char_map = json.load(c_map)
+        reverse_char_map = {v: k for k, v in char_map.items()}
+
+    with open(fsns_gt) as fsns_gt_f:
+        reader = csv.reader(fsns_gt_f, delimiter='\t')
+        lines = [l for l in reader]
+
+    text_lines = []
+    for line in tqdm(lines):
+        text = ''.join(map(lambda x: chr(char_map[x]), line[1:]))
+        if args.word_gt:
+            text = text.split(chr(char_map[args.blank_label]))
+            text = filter(lambda x: x != [], text)
+        else:
+            text = text.strip(chr(char_map[args.blank_label]))
+            text = text.split()
+
+        words = []
+        for t in text:
+            t = list(map(lambda x: reverse_char_map[ord(x)], t))
+            t.extend([args.blank_label] * (args.max_chars - len(t)))
+            words.append(t)
+
+        if line == []:
+            continue
+
+        words.extend([[args.blank_label] * args.max_chars for _ in range(args.max_words - len(words))])
+
+        text_lines.append([line[0]] + list(itertools.chain(*words)))
+
+    with open(args.destination, 'w') as dest:
+        writer = csv.writer(dest, delimiter='\t')
+        writer.writerow([args.max_words, args.max_chars])
+        writer.writerows(text_lines)
diff --git a/official/cv/cnn_direction_model/infer/docker_start_infer.sh b/official/cv/cnn_direction_model/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..072b0819ae7edbe63d62c98a039c43469157f792
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/docker_start_infer.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker_image=$1
+data_dir=$2
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image data_dir"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${data_dir}" ]; then
+        echo "please input data_dir"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it \
+  --device=/dev/davinci0 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${data_dir}:${data_dir} \
+  ${docker_image} \
+  /bin/bash
diff --git a/official/cv/cnn_direction_model/infer/mxbase/CMakeLists.txt b/official/cv/cnn_direction_model/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e97ecdd5f11d566d9f4be1d6b3da585d7d4a5caa
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,35 @@
+cmake_minimum_required(VERSION 3.5.2)
+project(cnndirection)
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+
+
+set(TARGET_MAIN cnndirection)
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories($ENV{MX_SDK_HOME}/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0)
+include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include)
+
+link_directories($ENV{MX_SDK_HOME}/lib)
+link_directories($ENV{MX_SDK_HOME}/opensource/lib/)
+
+
+add_compile_options(-std=c++11 -fPIC -fstack-protector-all -pie -Wno-deprecated-declarations)
+add_compile_options("-DPLUGIN_NAME=${PLUGIN_NAME}")
+add_compile_options("-Dgoogle=mindxsdk_private")
+
+add_definitions(-DENABLE_DVPP_INTERFACE)
+
+include_directories(${ACL_LIB_PATH}/include)
+link_directories(${ACL_LIB_PATH}/lib64/)
+
+
+
+add_executable(${TARGET_MAIN} src/main.cpp src/Cnndirection.cpp)
+target_link_libraries(${TARGET_MAIN} ${TARGET_LIBRARY} glog  cpprest mxbase libascendcl.so opencv_world)
diff --git a/official/cv/cnn_direction_model/infer/mxbase/build.sh b/official/cv/cnn_direction_model/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fe63664bef66c96ba4504752b18af48fa78a0463
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/mxbase/build.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+export ASCEND_VERSION=nnrt/latest
+export ARCH_PATTERN=.
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib/modelpostprocessors:${LD_LIBRARY_PATH}
+
+mkdir -p build
+cd build || exit
+
+function make_plugin() {
+    if ! cmake ..;
+    then
+      echo "cmake failed."
+      return 1
+    fi
+
+    if ! (make);
+    then
+      echo "make failed."
+      return 1
+    fi
+
+    return 0
+}
+
+if make_plugin;
+then
+  echo "INFO: Build successfully."
+else
+  echo "ERROR: Build failed."
+fi
+
+cd - || exit
+
diff --git a/official/cv/cnn_direction_model/infer/mxbase/src/Cnndirection.cpp b/official/cv/cnn_direction_model/infer/mxbase/src/Cnndirection.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..563b49afc105fcdca0e65064221f6140276bfc4a
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/mxbase/src/Cnndirection.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Cnndirection.h"
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <cmath>
+#include <vector>
+#include <algorithm>
+#include <queue>
+#include <utility>
+#include <fstream>
+#include <map>
+#include <iostream>
+#include "acl/acl.h"
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+
+namespace {
+    uint16_t READ_IMAGE_HEIGHT = 64;
+    uint16_t READ_IMAGE_WIDTH = 64;
+    uint16_t RESIZE_IMAGE_HEIGHT = 64;
+    uint16_t RESIZE_IMAGE_WIDTH = 512;
+    uint16_t FLOAT_SIZE = 4;
+}  // namespace
+
+APP_ERROR cnndirection::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_cnndirection = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_cnndirection->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR cnndirection::DeInit() {
+    model_cnndirection->DeInit();
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+    return APP_ERR_OK;
+}
+
+APP_ERROR cnndirection::ReadImage(const std::string &imgPath, std::vector<std::vector<std::vector<float>>> &im) {
+    cv::Mat imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
+    cv::resize(imageMat, imageMat, cv::Size(READ_IMAGE_HEIGHT, READ_IMAGE_WIDTH));
+
+
+    int w = imageMat.cols;
+    int h = imageMat.rows;
+
+    std::vector<float> rgb;
+    for (int i = 0; i < h; i++) {
+        std::vector<std::vector<float>> t;
+        im.push_back(t);
+        for (int j = 0; j < w; j++) {
+            float r = imageMat.at<cv::Vec3b>(i, j)[2];
+            float g =  imageMat.at<cv::Vec3b>(i, j)[1];
+            float b =  imageMat.at<cv::Vec3b>(i, j)[0];
+            rgb = {r, g, b};
+
+            im[i].push_back(rgb);
+        }
+    }
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR cnndirection::ReSize(const std::vector<std::vector<std::vector<float>>> &image,
+                               std::vector<std::vector<std::vector<float>>> &image_after) {
+    std::vector<std::vector<std::vector<float>>> image_resize;
+    static constexpr uint32_t Height = 64;
+    static constexpr uint32_t Width = 64;
+
+    for (int x = 0; x < RESIZE_IMAGE_HEIGHT; x++) {
+            std::vector<std::vector<float>> tt;
+            image_resize.push_back(tt);
+            for (int y = 0; y < RESIZE_IMAGE_WIDTH; y++) {
+                std::vector<float> tmp;
+                if (x < READ_IMAGE_HEIGHT && y < READ_IMAGE_WIDTH) {
+                    std::vector<float> t = {image[x][y][0]/127.5-1.0,
+                                            image[x][y][1]/127.5-1.0, image[x][y][2] / 127.5-1.0};
+                    tmp = t;
+                } else {
+                    std::vector<float> t = {1, 1, 1};
+                    tmp = t;
+                }
+                image_resize[x].push_back(tmp);
+            }
+    }
+    image_after = image_resize;
+    return APP_ERR_OK;
+}
+
+APP_ERROR cnndirection::VectorToTensorBase_float(const std::vector<std::vector<std::vector<float>>> &batchFeatureVector,
+                                    MxBase::TensorBase &tensorBase) {
+    uint32_t dataSize = 1;
+    std::vector<uint32_t> shape = {};
+    shape.push_back(1);
+    shape.push_back(batchFeatureVector[0][0].size());
+    shape.push_back(batchFeatureVector.size());
+    shape.push_back(batchFeatureVector[0].size());
+
+    for (uint32_t s = 0; s < shape.size(); ++s) {
+            dataSize *= shape[s];
+        }
+    float *metaFeatureData = new float[dataSize];
+    uint32_t idx = 0;
+    for (size_t ch = 0; ch < batchFeatureVector[0][0].size(); ch++) {
+        for (size_t h = 0; h < batchFeatureVector.size(); h++) {
+              for (size_t w = 0; w < batchFeatureVector[0].size(); w++) {
+                metaFeatureData[idx++] = batchFeatureVector[h][w][ch];
+                }
+        }
+    }
+    MxBase::MemoryData memoryDataDst(dataSize * FLOAT_SIZE, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
+    MxBase::MemoryData memoryDataSrc(metaFeatureData, dataSize * FLOAT_SIZE, MxBase::MemoryData::MEMORY_HOST_MALLOC);
+    APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+    tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_FLOAT32);
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR cnndirection::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                                      std::vector<MxBase::TensorBase> &outputs) {
+    auto dtypes = model_cnndirection->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs.push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret = model_cnndirection->ModelInference(inputs, outputs, dynamicInfo);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    inferCostTimeMilliSec += costMs;
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference cnndirection failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+APP_ERROR cnndirection::Process(const std::string &image_path, const InitParam &initParam, std::vector<int> &outputs) {
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs_tb = {};
+    std::string infer_result_path = "./infer_results.txt";
+
+    std::vector<std::vector<std::vector<float>>> image;
+    ReadImage(image_path, image);
+    ReSize(image, image);
+    MxBase::TensorBase tensorBase;
+    APP_ERROR ret = VectorToTensorBase_float(image, tensorBase);
+    if (ret != APP_ERR_OK) {
+        LogError << "ToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+    inputs.push_back(tensorBase);
+    auto startTime = std::chrono::high_resolution_clock::now();
+    APP_ERROR ret1 = Inference(inputs, outputs_tb);
+
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    inferCostTimeMilliSec += costMs;
+    if (ret1 != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret1 << ".";
+        return ret1;
+    }
+
+    if (!outputs_tb[0].IsHost()) {
+            outputs_tb[0].ToHost();
+        }
+    float *value = reinterpret_cast<float *>(outputs_tb[0].GetBuffer());
+
+    float res0 = value[0];
+    float res1 = value[1];
+
+    std::ofstream outfile(infer_result_path, std::ios::app);
+
+    if (outfile.fail()) {
+    LogError << "Failed to open result file: ";
+    return APP_ERR_COMM_FAILURE;
+    }
+    outfile << res0<< "\t"<< res1<< "\n";
+    outfile.close();
+
+    if (res0 > res1) {
+        outputs.push_back(0);
+    } else {
+        outputs.push_back(1);
+    }
+}
diff --git a/official/cv/cnn_direction_model/infer/mxbase/src/Cnndirection.h b/official/cv/cnn_direction_model/infer/mxbase/src/Cnndirection.h
new file mode 100644
index 0000000000000000000000000000000000000000..c0eee706d7c360762dd8296b81f5ed83c58342d3
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/mxbase/src/Cnndirection.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MXBASE_CNNDIRECTION_H
+#define MXBASE_CNNDIRECTION_H
+#include <memory>
+#include <string>
+#include <vector>
+#include "opencv2/opencv.hpp"
+#include "acl/acl.h"
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+#include "MxBase/CV/Core/DataType.h"
+
+struct InitParam {
+    uint32_t deviceId;
+    bool checkTensor;
+    std::string modelPath;
+};
+
+class cnndirection {
+ public:
+  APP_ERROR Init(const InitParam &initParam);
+  APP_ERROR DeInit();
+  APP_ERROR ReadImage(const std::string &imgPath, std::vector<std::vector<std::vector<float>>> &image);
+  APP_ERROR ReSize(const std::vector<std::vector<std::vector<float>>> &image,
+                   std::vector<std::vector<std::vector<float>>> &image_after);
+  APP_ERROR VectorToTensorBase_float(const std::vector<std::vector<std::vector<float>>> &batchFeatureVector,
+                                     MxBase::TensorBase &tensorBase);
+  APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
+  APP_ERROR Process(const std::string &image_path, const InitParam &initParam, std::vector<int> &outputs);
+  // get infer time
+  double GetInferCostMilliSec() const {return inferCostTimeMilliSec;}
+
+
+ private:
+  std::shared_ptr<MxBase::ModelInferenceProcessor> model_cnndirection;
+  MxBase::ModelDesc modelDesc_;
+  uint32_t deviceId_ = 0;
+  // infer time
+  double inferCostTimeMilliSec = 0.0;
+};
+
+#endif
+
+
diff --git a/official/cv/cnn_direction_model/infer/mxbase/src/main.cpp b/official/cv/cnn_direction_model/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1003b44936fe4ca7e57e7bb95dd4b5f8dcd2fce8
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/mxbase/src/main.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <fstream>
+#include "MxBase/Log/Log.h"
+#include "Cnndirection.h"
+
+namespace {
+    const uint32_t CLASS_NUM = 2;
+    const uint32_t BATCH_SIZE = 1;
+    const std::string resFileName = "../results/eval_mxbase.log";
+}  // namespace
+
+void SplitString(const std::string &s, std::vector<std::string> *v, const std::string &c) {
+    std::string::size_type pos1, pos2;
+    pos2 = s.find(c);
+    pos1 = 0;
+    while (std::string::npos != pos2) {
+        v->push_back(s.substr(pos1, pos2 - pos1));
+
+        pos1 = pos2 + c.size();
+        pos2 = s.find(c, pos1);
+    }
+
+    if (pos1 != s.length()) {
+        v->push_back(s.substr(pos1));
+    }
+}
+
+APP_ERROR ReadImagesPath(const std::string &path, std::vector<std::string> *imagesPath, std::vector<int> *imageslabel) {
+    std::ifstream inFile;
+    inFile.open(path, std::ios_base::in);
+    std::string line;
+    // Check images path file validity
+    if (inFile.fail()) {
+        LogError << "Failed to open annotation file: " << path;
+        return APP_ERR_COMM_OPEN_FAIL;
+    }
+    std::vector<std::string> vectorStr_path;
+    std::vector<std::string> vectorStr_label;
+    std::string splitStr_path = "\t";
+    std::string splitStr_label = "_";
+    // construct label map
+    while (std::getline(inFile, line)) {
+        if (line.size() < 10) {
+            continue;
+        }
+        vectorStr_path.clear();
+        SplitString(line, &vectorStr_path, splitStr_path);
+        std::string str_path = vectorStr_path[0];
+        str_path = str_path.replace(str_path.find("\\"), 1, "/").replace(str_path.find("\\"), 1, "/");
+        imagesPath->push_back(str_path);
+        vectorStr_label.clear();
+        SplitString(vectorStr_path[0], &vectorStr_label, splitStr_label);
+        int label = vectorStr_label[1][0] - '0';
+        imageslabel->push_back(label);
+    }
+
+    inFile.close();
+    return APP_ERR_OK;
+}
+
+int main(int argc, char* argv[]) {
+    InitParam initParam = {};
+    initParam.deviceId = 0;
+    initParam.checkTensor = true;
+    initParam.modelPath = "../data/models/cnn.om";
+    std::string dataPath = "../data/image/";
+    std::string annoPath = "../data/image/annotation_test.txt";
+
+    auto model_cnndirection = std::make_shared<cnndirection>();
+    APP_ERROR ret = model_cnndirection->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "Tagging init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::vector<std::string> imagesPath;
+    std::vector<int> imageslabel;
+    ret = ReadImagesPath(annoPath, &imagesPath, &imageslabel);
+
+    if (ret != APP_ERR_OK) {
+        model_cnndirection->DeInit();
+        return ret;
+    }
+
+    int img_size = imagesPath.size();
+    LogInfo << "test image size:" << img_size;
+
+    std::vector<int> outputs;
+    for (int i=0; i < img_size; i++) {
+        ret = model_cnndirection->Process(dataPath + imagesPath[i], initParam, outputs);
+        if (ret !=APP_ERR_OK) {
+            LogError << "Tacotron2 process failed, ret=" << ret << ".";
+            model_cnndirection->DeInit();
+            return ret;
+        }
+    }
+    float num_0 = 0;
+    float num_1 = 0;
+    float cor_0 = 0;
+    float cor_1 = 0;
+    for (int i = 0; i < img_size; i++) {
+        int label_now = imageslabel[i];
+        if (label_now == 0) {
+            num_0++;
+            if (outputs[i] == 0) {
+                cor_0++;
+            }
+        } else {
+            num_1++;
+            if (outputs[i] == 1) {
+                cor_1++;
+            }
+        }
+    }
+
+    model_cnndirection->DeInit();
+
+    double total_time = model_cnndirection->GetInferCostMilliSec() / 1000;
+
+    LogInfo<< "num1: "<< num_1<< ",acc1: "<< static_cast<float>(cor_1/num_1);
+    LogInfo<< "num0: "<< num_0<< ",acc0: "<< static_cast<float>(cor_0/num_0);
+    LogInfo<< "total num: "<< img_size<< ",acc total: "<< static_cast<float>(cor_1+cor_0)/img_size;
+    LogInfo<< "inferance total cost time: "<< total_time<< ", FPS: "<< img_size/total_time;
+
+    std::ofstream outfile(resFileName);
+    if (outfile.fail()) {
+    LogError << "Failed to open result file: ";
+    return APP_ERR_COMM_FAILURE;
+    }
+    outfile << "num1: "<< num_1<< ",acc1: "<< static_cast<float>(cor_1/num_1)<< "\n";
+    outfile << "num0: "<< num_0<< ",acc0: "<< static_cast<float>(cor_0/num_0)<< "\n";
+    outfile << "total num: "<< img_size<< ",acc total: "<< static_cast<float>(cor_1+cor_0)/img_size<< "\n";
+    outfile << "inferance total cost time(s): "<< total_time<< ", FPS: "<< img_size/total_time;
+    outfile.close();
+
+    return APP_ERR_OK;
+}
diff --git a/official/cv/cnn_direction_model/infer/sdk/main.py b/official/cv/cnn_direction_model/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..64093a8883dc87730a9865c1dffd70109bfc1164
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/sdk/main.py
@@ -0,0 +1,186 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+""" main.py """
+import os
+import argparse
+from StreamManagerApi import StreamManagerApi, StringVector
+from StreamManagerApi import MxDataInput, InProtobufVector, MxProtobufIn
+import MxpiDataType_pb2 as MxpiDataType
+
+import numpy as np
+from PIL import Image
+import cv2
+
+
+def parse_args(parser_):
+    """
+    Parse commandline arguments.
+    """
+    parser_.add_argument('--images_txt_path', type=str, default="../data/image/images.txt",
+                         help='image text')
+    parser_.add_argument('--labels_txt_path', type=str, default="../data/image/labels.txt",
+                         help='label')
+    return parser_
+
+
+def read_file_list(input_file):
+    """
+    :param infer file content:
+        0 xxx/xxx/a.jpg 1920 1080 0 453 369 473 391 1 588 245 608 268
+        1 xxx/xxx/b.jpg 1920 1080 1 466 403 485 422 2 793 300 809 320
+        ...
+    :return image path list
+    """
+    image_file_list = []
+    if not os.path.exists(input_file):
+        print('input file does not exists.')
+    with open(input_file, "r") as fs:
+        for line in fs.readlines():
+            if len(line) > 10:
+                line = line.strip('\n').split('\t')[0].replace('\\', '/')
+                image_file_list.append(line)
+    return image_file_list
+
+
+image_height = 64
+image_width = 512
+
+
+def resize_image(img):
+    color_fill = 255
+    scale = image_height / img.shape[0]
+    img = cv2.resize(img, None, fx=scale, fy=scale)
+    if img.shape[1] > image_width:
+        img = img[:, 0:image_width]
+    else:
+        blank_img = np.zeros((image_height, image_width, 3), np.uint8)
+        # fill the image with white
+        blank_img.fill(color_fill)
+        blank_img[:image_height, :img.shape[1]] = img
+        img = blank_img
+    data = np.array([img[...]], np.float32)
+    data = data / 127.5 - 1  # [1,3,64,512]  0-255
+    return data.transpose((0, 3, 1, 2))
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description='Om CNN Direction Inference')
+    parser = parse_args(parser)
+    args, _ = parser.parse_known_args()
+    # init stream manager
+    stream_manager = StreamManagerApi()
+    ret = stream_manager.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        exit()
+
+    # create streams by pipeline config file
+    with open("../data/config/cnndirection.pipeline", 'rb') as f:
+        pipeline = f.read()
+    ret = stream_manager.CreateMultipleStreams(pipeline)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        exit()
+
+    # Construct the input of the stream
+
+    res_dir_name = 'result'
+    if not os.path.exists(res_dir_name):
+        os.makedirs(res_dir_name)
+
+    acc = 0
+    acc_1 = 0
+    acc_0 = 0
+    num_1 = 0
+    num_0 = 0
+    infer_file = '../data/image/annotation_test.txt'
+    file_list = read_file_list(infer_file)
+
+    img_size = len(file_list)
+    results = []
+
+    for file in file_list:
+        image = Image.open(os.path.join('../data/image/', file))
+        image = np.array(image)
+        image = resize_image(image)
+        label = int(file.split('_')[-1].split('.')[0])
+
+        # Construct the input of the stream
+        data_input1 = MxDataInput()
+        data_input1.data = image.tobytes()
+        tensorPackageList1 = MxpiDataType.MxpiTensorPackageList()
+        tensorPackage1 = tensorPackageList1.tensorPackageVec.add()
+        tensorVec1 = tensorPackage1.tensorVec.add()
+        tensorVec1.deviceId = 0
+        tensorVec1.memType = 0
+        for t in image.shape:
+            tensorVec1.tensorShape.append(t)
+        tensorVec1.dataStr = data_input1.data
+        tensorVec1.tensorDataSize = len(image.tobytes())
+        protobufVec1 = InProtobufVector()
+        protobuf1 = MxProtobufIn()
+        protobuf1.key = b'appsrc0'
+        protobuf1.type = b'MxTools.MxpiTensorPackageList'
+        protobuf1.protobuf = tensorPackageList1.SerializeToString()
+        protobufVec1.push_back(protobuf1)
+
+        unique_id = stream_manager.SendProtobuf(b'cnn_direction', b'appsrc0', protobufVec1)
+
+        # Obtain the inference result by specifying streamName and uniqueId.
+        keyVec = StringVector()
+        keyVec.push_back(b'mxpi_tensorinfer0')
+        infer_result = stream_manager.GetProtobuf(b'cnn_direction', 0, keyVec)
+
+        if infer_result.size() == 0:
+            print("inferResult is null")
+            exit()
+        if infer_result[0].errorCode != 0:
+            print("GetProtobuf error. errorCode=%d" % (
+                infer_result[0].errorCode))
+            exit()
+        # get infer result
+        result = MxpiDataType.MxpiTensorPackageList()
+        result.ParseFromString(infer_result[0].messageBuf)
+        res = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)
+
+        results.append(res)
+
+        if label == 0:
+            num_0 += 1
+            if res[0] > res[1]:
+                acc += 1
+                acc_0 += 1
+        else:
+            num_1 += 1
+            if res[0] <= res[1]:
+                acc += 1
+                acc_1 += 1
+
+    results = np.vstack(results)
+    np.savetxt("./result/infer_results.txt", results, fmt='%.06f')
+
+    # destroy streams
+    stream_manager.DestroyAllStreams()
+    print('Eval size:', img_size)
+    print('num of label 1:', num_1, 'total acc1:', acc_1 / num_1)
+    print('num of label 0:', num_0, 'total acc0:', acc_0 / num_0)
+    print('total acc:', acc / img_size)
+
+    with open("../results/eval_sdk.log", 'w') as f:
+        f.write('Eval size: {} \n'.format(img_size))
+        f.write('num of label 1: {}, total acc1: {} \n'.format(num_1, acc_1 / num_1))
+        f.write('num of label 0: {}, total acc0: {} \n'.format(num_0, acc_0 / num_0))
+        f.write('total acc: {} \n'.format(acc / img_size))
diff --git a/official/cv/cnn_direction_model/infer/sdk/prec/eval_sdk.py b/official/cv/cnn_direction_model/infer/sdk/prec/eval_sdk.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b6286ca11240b0bf53f78e520602997a590e403
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/sdk/prec/eval_sdk.py
@@ -0,0 +1,61 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+""" eval_sdk.py """
+import os
+import numpy as np
+
+
+def read_file_list(input_file):
+    """
+    :param infer file content:
+        0 xxx/xxx/a.jpg 1920 1080 0 453 369 473 391 1 588 245 608 268
+        1 xxx/xxx/b.jpg 1920 1080 1 466 403 485 422 2 793 300 809 320
+        ...
+    :return image path list
+    """
+    image_file_l = []
+    if not os.path.exists(input_file):
+        print('input file does not exists.')
+    with open(input_file, "r") as fs:
+        for line in fs.readlines():
+            if len(line) > 10:
+                line = line.strip('\n').split('\t')[0].replace('\\', '/')
+                image_file_l.append(line)
+    return image_file_l
+
+
+def cal_acc(result, gt_classes):
+    img_total = len(gt_classes)
+    top1_correct = 0
+
+    top1_output = np.argmax(result, (-1))
+
+    t1_correct = np.equal(top1_output, gt_classes).sum()
+    top1_correct += t1_correct
+
+    acc1 = 100.0 * top1_correct / img_total
+    print('top1_correct={}, total={}, acc={:.2f}%'.format(top1_correct, img_total, acc1))
+
+
+if __name__ == "__main__":
+    results_path = '../result/infer_results.txt'
+    infer_file = '../../data/image/annotation_test.txt'
+    results = np.loadtxt(results_path)
+
+    file_list = read_file_list(infer_file)
+    labels = [int(file.split('_')[-1].split('.')[0]) for file in file_list]
+    labels = np.array(labels)
+
+    cal_acc(results, labels)
diff --git a/official/cv/cnn_direction_model/infer/sdk/run.sh b/official/cv/cnn_direction_model/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..86a725bc7626b4bd3ac44f279cc30dc89734a681
--- /dev/null
+++ b/official/cv/cnn_direction_model/infer/sdk/run.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3.7 main.py
+exit 0
\ No newline at end of file
diff --git a/official/cv/cnn_direction_model/modelarts/train_modelart.py b/official/cv/cnn_direction_model/modelarts/train_modelart.py
new file mode 100644
index 0000000000000000000000000000000000000000..be0f3dfcc633f4edfa395073e2b41c9c8ae2a17f
--- /dev/null
+++ b/official/cv/cnn_direction_model/modelarts/train_modelart.py
@@ -0,0 +1,185 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train CNN direction model."""
+
+import os
+import time
+import random
+from ast import literal_eval as liter
+import numpy as np
+import mindspore as ms
+from mindspore import Tensor, export
+from mindspore import context
+from mindspore import dataset as de
+from mindspore.communication.management import init, get_rank
+from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
+from mindspore.nn.metrics import Accuracy
+from mindspore.nn.optim.adam import Adam
+from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
+from mindspore.train.model import Model
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+from mindspore.context import ParallelMode
+from src.cnn_direction_model import CNNDirectionModel
+from src.dataset import create_dataset_train
+from src.model_utils.config import config
+from src.model_utils.moxing_adapter import moxing_wrapper
+from src.model_utils.device_adapter import get_device_id, get_device_num, get_rank_id
+
+
+random.seed(11)
+np.random.seed(11)
+de.config.set_seed(11)
+ms.common.set_seed(11)
+
+
+def modelarts_pre_process():
+    '''modelarts pre process function.'''
+    def unzip(zip_file, save_dir):
+        import zipfile
+        s_time = time.time()
+        if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
+            zip_isexist = zipfile.is_zipfile(zip_file)
+            if zip_isexist:
+                fz = zipfile.ZipFile(zip_file, 'r')
+                data_num = len(fz.namelist())
+                print("Extract Start...")
+                print("unzip file num: {}".format(data_num))
+                data_print = int(data_num / 100) if data_num > 100 else 1
+                i = 0
+                for file in fz.namelist():
+                    if i % data_print == 0:
+                        print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
+                    i += 1
+                    fz.extract(file, save_dir)
+                print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
+                                                     int(int(time.time() - s_time) % 60)))
+                print("Extract Done.")
+            else:
+                print("This is not zip.")
+        else:
+            print("Zip has been extracted.")
+
+    if config.need_modelarts_dataset_unzip:
+        zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
+        save_dir_1 = os.path.join(config.data_path)
+
+        sync_lock = "/tmp/unzip_sync.lock"
+
+        # Each server contains 8 devices as most.
+        if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
+            print("Zip file path: ", zip_file_1)
+            print("Unzip file save dir: ", save_dir_1)
+            unzip(zip_file_1, save_dir_1)
+            print("===Finish extract data synchronization===")
+            try:
+                os.mknod(sync_lock)
+            except IOError:
+                pass
+
+        while True:
+            if os.path.exists(sync_lock):
+                break
+            time.sleep(1)
+
+        print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
+
+
+@moxing_wrapper(pre_process=modelarts_pre_process)
+def train():
+    config.lr = liter(config.lr)
+    target = config.device_target
+    ckpt_save_dir = config.train_url
+
+    context.set_context(mode=context.GRAPH_MODE,
+                        device_target=target,
+                        save_graphs=False)
+    rank_size = get_device_num()
+    run_distribute = rank_size > 1
+    device_id = get_device_id()
+
+    if target == "Ascend":
+        # init context
+        rank_id = get_rank_id()
+        context.set_context(device_id=device_id)
+
+        if run_distribute:
+            context.set_auto_parallel_context(device_num=rank_size, parallel_mode=ParallelMode.DATA_PARALLEL)
+            init()
+    elif target == "GPU":
+        rank_id = 0
+        if run_distribute:
+            context.set_auto_parallel_context(device_num=rank_size, parallel_mode=ParallelMode.DATA_PARALLEL)
+            init()
+            rank_id = get_rank()
+    print("train args: ", config, "\ncfg: ", config,
+          "\nparallel args: rank_id {}, device_id {}, rank_size {}".format(rank_id, device_id, rank_size))
+
+
+    config.rank_save_ckpt_flag = 0
+    if config.is_save_on_master:
+        if rank_id == 0:
+            config.rank_save_ckpt_flag = 1
+    else:
+        config.rank_save_ckpt_flag = 1
+
+    # create dataset
+    dataset_name = config.dataset_name
+    dataset = create_dataset_train(config.data_url + "/" + dataset_name +
+                                   ".mindrecord", config=config, dataset_name=dataset_name)
+    step_size = dataset.get_dataset_size()
+
+    print("step_size ", step_size, flush=True)
+
+    # define net
+    net = CNNDirectionModel([3, 64, 48, 48, 64], [64, 48, 48, 64, 64], [256, 64], [64, 512])
+
+    # init weight
+    if config.pre_trained:
+        param_dict = load_checkpoint(config.pre_trained)
+        load_param_into_net(net, param_dict)
+
+    lr = config.lr
+    lr = Tensor(lr, ms.float32)
+
+    # define opt
+    opt = Adam(params=net.trainable_params(), learning_rate=lr, eps=1e-07)
+
+    # define loss, model
+    loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="sum")
+
+    model = Model(net, loss_fn=loss, optimizer=opt, metrics={"Accuracy": Accuracy()})
+
+    # define callbacks
+    time_cb = TimeMonitor(data_size=step_size)
+    loss_cb = LossMonitor()
+    cb = [time_cb, loss_cb]
+    if config.save_checkpoint:
+        config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_steps,
+                                     keep_checkpoint_max=config.keep_checkpoint_max)
+        ckpt_cb = ModelCheckpoint(prefix="cnn_direction_model", directory=ckpt_save_dir, config=config_ck)
+        cb += [ckpt_cb]
+
+    # train model
+    model.train(config.epoch_size, dataset, callbacks=cb, dataset_sink_mode=False)
+
+    net.set_train(False)
+
+    input_data = Tensor(np.zeros([1, 3, config.im_size_h, config.im_size_w]), ms.float32)
+
+    export(net, input_data, file_name=os.path.join(ckpt_save_dir, config.file_name), file_format="AIR")
+
+
+if __name__ == "__main__":
+    train()