From 750074674eaa7755bfbe9683646de4906483124d Mon Sep 17 00:00:00 2001 From: PEETA <645517163@qq.com> Date: Fri, 6 May 2022 18:48:34 +0800 Subject: [PATCH] 310 --- official/cv/MCNN/README.md | 6 +- .../cv/MCNN/ascend310_infer/CMakeLists.txt | 16 ++ official/cv/MCNN/ascend310_infer/build.sh | 23 +++ official/cv/MCNN/ascend310_infer/inc/utils.h | 33 ++++ official/cv/MCNN/ascend310_infer/src/main.cc | 155 ++++++++++++++++++ official/cv/MCNN/ascend310_infer/src/utils.cc | 141 ++++++++++++++++ official/cv/MCNN/postprocess.py | 53 ++++++ official/cv/MCNN/preprocess.py | 48 ++++++ official/cv/MCNN/scripts/run_infer_310.sh | 117 +++++++++++++ 9 files changed, 591 insertions(+), 1 deletion(-) create mode 100644 official/cv/MCNN/ascend310_infer/CMakeLists.txt create mode 100644 official/cv/MCNN/ascend310_infer/build.sh create mode 100644 official/cv/MCNN/ascend310_infer/inc/utils.h create mode 100644 official/cv/MCNN/ascend310_infer/src/main.cc create mode 100644 official/cv/MCNN/ascend310_infer/src/utils.cc create mode 100644 official/cv/MCNN/postprocess.py create mode 100644 official/cv/MCNN/preprocess.py create mode 100644 official/cv/MCNN/scripts/run_infer_310.sh diff --git a/official/cv/MCNN/README.md b/official/cv/MCNN/README.md index 4c6510d91..628c2fff7 100644 --- a/official/cv/MCNN/README.md +++ b/official/cv/MCNN/README.md @@ -114,9 +114,13 @@ bash run_infer_310.sh ../mcnn.mindir ../test_data/images ../test_data/ground_tru 鈹溾攢鈹€ cv 鈹溾攢鈹€ MCNN 鈹溾攢鈹€ README.md // descriptions about MCNN + 鈹溾攢鈹€ ascend310_infer // 310 reasoning source code + 鈹溾攢鈹€ infer + 鈹溾攢鈹€ modelarts 鈹溾攢鈹€ scripts 鈹� 鈹溾攢鈹€run_distribute_train.sh // train in distribute - 鈹� 鈹溾攢鈹€run_eval.sh // eval in ascend + 鈹� 鈹溾攢鈹€run_eval.sh // eval in ascend + 鈹� 鈹溾攢鈹€run_infer_310.sh // infer in 310 鈹� 鈹溾攢鈹€run_standalone_train.sh // train in standalone 鈹� 鈹溾攢鈹€run_train_gpu.sh // train on GPU 鈹溾攢鈹€ src diff --git a/official/cv/MCNN/ascend310_infer/CMakeLists.txt b/official/cv/MCNN/ascend310_infer/CMakeLists.txt new file mode 100644 index 000000000..d1ea8e790 --- /dev/null +++ b/official/cv/MCNN/ascend310_infer/CMakeLists.txt @@ -0,0 +1,16 @@ +cmake_minimum_required(VERSION 3.14.1) +project(Ascend310Infer) +add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined") +set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/) +option(MINDSPORE_PATH "mindspore install path" "") +include_directories(${MINDSPORE_PATH}) +include_directories(${MINDSPORE_PATH}/include) +include_directories(${PROJECT_SRC_ROOT}) + +find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib) +file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*) +find_package(gflags REQUIRED) + +add_executable(main src/main.cc src/utils.cc) +target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags) diff --git a/official/cv/MCNN/ascend310_infer/build.sh b/official/cv/MCNN/ascend310_infer/build.sh new file mode 100644 index 000000000..770a8851e --- /dev/null +++ b/official/cv/MCNN/ascend310_infer/build.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ ! -d out ]; then + mkdir out +fi +cd out || exit +cmake .. \ + -DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`" +make diff --git a/official/cv/MCNN/ascend310_infer/inc/utils.h b/official/cv/MCNN/ascend310_infer/inc/utils.h new file mode 100644 index 000000000..59862dcf9 --- /dev/null +++ b/official/cv/MCNN/ascend310_infer/inc/utils.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INFERENCE_UTILS_H_ +#define MINDSPORE_INFERENCE_UTILS_H_ + +#include <sys/stat.h> +#include <dirent.h> +#include <vector> +#include <string> +#include <memory> + +#include "include/api/types.h" + +std::vector<std::string> GetAllFiles(std::string_view dirName); +DIR *OpenDir(std::string_view dirName); +std::string RealPath(std::string_view path); +mindspore::MSTensor ReadFileToTensor(const std::string &file); +int WriteResult(const std::string& imageFile, const std::vector<mindspore::MSTensor> &outputs); +#endif diff --git a/official/cv/MCNN/ascend310_infer/src/main.cc b/official/cv/MCNN/ascend310_infer/src/main.cc new file mode 100644 index 000000000..4f199697c --- /dev/null +++ b/official/cv/MCNN/ascend310_infer/src/main.cc @@ -0,0 +1,155 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <sys/time.h> +#include <gflags/gflags.h> +#include <dirent.h> +#include <iostream> +#include <string> +#include <algorithm> +#include <iosfwd> +#include <vector> +#include <fstream> +#include <sstream> + +#include "../inc/utils.h" +#include "include/dataset/execute.h" +#include "include/dataset/transforms.h" +#include "include/dataset/vision.h" +#include "include/dataset/vision_ascend.h" +#include "include/api/types.h" +#include "include/api/model.h" +#include "include/api/serialization.h" +#include "include/api/context.h" + + +using mindspore::Context; +using mindspore::Serialization; +using mindspore::Model; +using mindspore::Status; +using mindspore::dataset::Execute; +using mindspore::MSTensor; +using mindspore::ModelType; +using mindspore::GraphCell; +using mindspore::kSuccess; +using mindspore::Graph; +using mindspore::dataset::Execute; +using mindspore::dataset::TensorTransform; +using mindspore::dataset::vision::Decode; +using mindspore::dataset::vision::Resize; +using mindspore::dataset::vision::Normalize; +using mindspore::dataset::vision::HWC2CHW; + +DEFINE_string(model_path, "../mcnn.mindir", "model path"); +DEFINE_string(dataset_path, "../test_data/preprocess_data", "dataset path"); +DEFINE_string(label_path, "../test_data/ground_truth_csv", "label path"); +DEFINE_int32(input_width, 960, "input width"); +DEFINE_int32(input_height, 576, "inputheight"); +DEFINE_int32(device_id, 0, "device id"); + + +int main(int argc, char **argv) { + gflags::ParseCommandLineFlags(&argc, &argv, true); + if (RealPath(FLAGS_model_path).empty()) { + std::cout << "Invalid mindir" << std::endl; + return 1; + } + + auto context = std::make_shared<Context>(); + auto ascend310_info = std::make_shared<mindspore::Ascend310DeviceInfo>(); + ascend310_info->SetDeviceID(FLAGS_device_id); + context->MutableDeviceInfo().push_back(ascend310_info); + + Graph graph; + Status ret = Serialization::Load(FLAGS_model_path, ModelType::kMindIR, &graph); + if (ret != kSuccess) { + std::cout << "Load model failed." << std::endl; + return 1; + } + + Model model; + ret = model.Build(GraphCell(graph), context); + if (ret != kSuccess) { + std::cout << "ERROR: Build failed." << std::endl; + return 1; + } + + std::vector<MSTensor> modelInputs = model.GetInputs(); + + auto all_files = GetAllFiles(FLAGS_dataset_path); + if (all_files.empty()) { + std::cout << "ERROR: no input data." << std::endl; + return 1; + } + + auto all_labels = GetAllFiles(FLAGS_label_path); + + std::map<double, double> costTime_map; + size_t size = all_files.size(); + + for (size_t i = 0; i < size; ++i) { + struct timeval start; + struct timeval end; + double startTime_ms; + double endTime_ms; + std::vector<MSTensor> inputs; + std::vector<MSTensor> outputs; + + std::cout << "Start predict input files:" << all_files[i] << std::endl; + std::cout << " It's label is: " << all_labels[i] << std::endl; + + mindspore::MSTensor image = ReadFileToTensor(all_files[i]); + + inputs.emplace_back(modelInputs[0].Name(), modelInputs[0].DataType(), modelInputs[0].Shape(), + image.Data().get(), image.DataSize()); + + gettimeofday(&start, NULL); + ret = model.Predict(inputs, &outputs); + gettimeofday(&end, NULL); + if (ret != kSuccess) { + std::cout << "Predict " << all_files[i] << " failed." << std::endl; + return 1; + } + startTime_ms = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000; + endTime_ms = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000; + costTime_map.insert(std::pair<double, double>(startTime_ms, endTime_ms)); + int rst = WriteResult(all_files[i], outputs); + if (rst != 0) { + std::cout << "write result failed." << std::endl; + return rst; + } + } + double average = 0.0; + int infer_cnt = 0; + + for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) { + double diff = 0.0; + diff = iter->second - iter->first; + average += diff; + infer_cnt++; + } + + average = average / infer_cnt; + std::stringstream timeCost; + timeCost << "NN inference cost average time: "<< average << " ms of infer_count " << infer_cnt << std::endl; + std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << infer_cnt << std::endl; + std::string file_name = "./time_Result" + std::string("/test_perform_static.txt"); + std::ofstream file_stream(file_name.c_str(), std::ios::trunc); + file_stream << timeCost.str(); + file_stream.close(); + costTime_map.clear(); + return 0; +} diff --git a/official/cv/MCNN/ascend310_infer/src/utils.cc b/official/cv/MCNN/ascend310_infer/src/utils.cc new file mode 100644 index 000000000..04e3588d5 --- /dev/null +++ b/official/cv/MCNN/ascend310_infer/src/utils.cc @@ -0,0 +1,141 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "inc/utils.h" + +#include <fstream> +#include <algorithm> +#include <iostream> + +using mindspore::MSTensor; +using mindspore::DataType; + + +std::vector<std::string> GetAllFiles(std::string_view dirName) { + struct dirent *filename; + DIR *dir = OpenDir(dirName); + if (dir == nullptr) { + return {}; + } + std::vector<std::string> res; + while ((filename = readdir(dir)) != nullptr) { + std::string dName = std::string(filename->d_name); + if (dName == "." || dName == ".." || filename->d_type != DT_REG) { + continue; + } + res.emplace_back(std::string(dirName) + "/" + filename->d_name); + } + std::sort(res.begin(), res.end()); + for (auto &f : res) { + std::cout << "image file: " << f << std::endl; + } + return res; +} + +int WriteResult(const std::string& imageFile, const std::vector<MSTensor> &outputs) { + std::string homePath = "./result_Files"; + const int INVALID_POINTER = -1; + const int ERROR = -2; + for (size_t i = 0; i < outputs.size(); ++i) { + size_t outputSize; + std::shared_ptr<const void> netOutput = outputs[i].Data(); + outputSize = outputs[i].DataSize(); + int pos = imageFile.rfind('/'); + std::string fileName(imageFile, pos + 1); + fileName.replace(fileName.find('.'), fileName.size() - fileName.find('.'), '_' + std::to_string(i) + ".bin"); + std::string outFileName = homePath + "/" + fileName; + FILE *outputFile = fopen(outFileName.c_str(), "wb"); + if (outputFile == nullptr) { + std::cout << "open result file " << outFileName << " failed" << std::endl; + return INVALID_POINTER; + } + size_t size = fwrite(netOutput.get(), sizeof(char), outputSize, outputFile); + if (size != outputSize) { + fclose(outputFile); + outputFile = nullptr; + std::cout << "write result file " << outFileName << " failed, write size[" << size << + "] is smaller than output size[" << outputSize << "], maybe the disk is full." << std::endl; + return ERROR; + } + fclose(outputFile); + outputFile = nullptr; + } + return 0; +} + +mindspore::MSTensor ReadFileToTensor(const std::string &file) { + if (file.empty()) { + std::cout << "Pointer file is nullptr" << std::endl; + return mindspore::MSTensor(); + } + + std::ifstream ifs(file); + if (!ifs.good()) { + std::cout << "File: " << file << " is not exist" << std::endl; + return mindspore::MSTensor(); + } + + if (!ifs.is_open()) { + std::cout << "File: " << file << "open failed" << std::endl; + return mindspore::MSTensor(); + } + + ifs.seekg(0, std::ios::end); + size_t size = ifs.tellg(); + mindspore::MSTensor buffer(file, mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, size); + + ifs.seekg(0, std::ios::beg); + ifs.read(reinterpret_cast<char *>(buffer.MutableData()), size); + ifs.close(); + + return buffer; +} + + +DIR *OpenDir(std::string_view dirName) { + if (dirName.empty()) { + std::cout << " dirName is null ! " << std::endl; + return nullptr; + } + std::string realPath = RealPath(dirName); + struct stat s; + lstat(realPath.c_str(), &s); + if (!S_ISDIR(s.st_mode)) { + std::cout << "dirName is not a valid directory !" << std::endl; + return nullptr; + } + DIR *dir = opendir(realPath.c_str()); + if (dir == nullptr) { + std::cout << "Can not open dir " << dirName << std::endl; + return nullptr; + } + std::cout << "Successfully opened the dir " << dirName << std::endl; + return dir; +} + +std::string RealPath(std::string_view path) { + char realPathMem[PATH_MAX] = {0}; + char *realPathRet = nullptr; + realPathRet = realpath(path.data(), realPathMem); + if (realPathRet == nullptr) { + std::cout << "File: " << path << " is not exist."; + return ""; + } + + std::string realPath(realPathMem); + std::cout << path << " realpath is: " << realPath << std::endl; + return realPath; +} diff --git a/official/cv/MCNN/postprocess.py b/official/cv/MCNN/postprocess.py new file mode 100644 index 000000000..6e56eeefa --- /dev/null +++ b/official/cv/MCNN/postprocess.py @@ -0,0 +1,53 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""postprocess for 310 inference""" +import os +import argparse +import numpy as np +import pandas as pd + + +parser = argparse.ArgumentParser(description="postprocess") +label_path = "../test_data/ground_truth_csv/" +parser.add_argument("--result_dir", type=str, default="./ascend310_infer/result_Files", help="result files path.") +parser.add_argument("--label_dir", type=str, default=label_path, help="image file path.") +args = parser.parse_args() + + +if __name__ == '__main__': + + rst_path = args.result_dir + # labels = np.load(args.label_dir, allow_pickle=True) + label_files = [filename for filename in os.listdir(args.label_dir) \ + if os.path.isfile(os.path.join(args.label_dir, filename))] + label_files.sort() + mae = 0 + mse = 0 + for idx, label_file in enumerate(label_files): + # os.path.join(label_path, "IMG_"+str(idx+1)+".csv") + den = pd.read_csv(os.path.join(label_path, "IMG_"+str(idx+1)+".csv"), sep=',', header=None).values + den = den.astype(np.float32, copy=False) + + f_name = os.path.join(rst_path, "IMG" + "_" + str(idx+1) + "_0.bin") + pred = np.fromfile(f_name, np.float32) + gt_count = np.sum(den) + et_count = np.sum(pred) + mae += abs(gt_count - et_count) + mse += ((gt_count - et_count) * (gt_count - et_count)) + print(os.path.join(label_path, "IMG_"+str(idx+1)+".csv"), np.sum(den)) + print(f_name, np.sum(pred)) + mae = mae / 182 + mse = np.sqrt(mse / 182) + print('MAE:', mae, ' MSE:', mse) diff --git a/official/cv/MCNN/preprocess.py b/official/cv/MCNN/preprocess.py new file mode 100644 index 000000000..b213380ca --- /dev/null +++ b/official/cv/MCNN/preprocess.py @@ -0,0 +1,48 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""pre process for 310 inference""" +import os +import argparse +import cv2 +import numpy as np + +batch_size = 1 +parser = argparse.ArgumentParser(description="mcnn preprocess data") +parser.add_argument("--dataset_path", type=str, default="./test_data/images/", help="dataset path.") +parser.add_argument("--output_path", type=str, default="./test_data/preprocess_data/", help="output path.") +args = parser.parse_args() + + +def save_mnist_to_jpg(data_path, output_path): + data_files = [filename for filename in os.listdir(data_path) \ + if os.path.isfile(os.path.join(data_path, filename))] + if not os.path.exists(output_path): + os.makedirs(output_path) + for fname in data_files: + img = cv2.imread(os.path.join(data_path, fname), 0) + img = img.astype(np.float32, copy=False) + ht = img.shape[0] + wd = img.shape[1] + ht_1 = (ht // 4) * 4 + wd_1 = (wd // 4) * 4 + img = cv2.resize(img, (wd_1, ht_1)) + hang = (1024 - ht_1) // 2 + lie = (1024 - wd_1) // 2 + img = np.pad(img, ((hang, hang), (lie, lie)), 'constant') + img.tofile(os.path.join(output_path, fname+'.bin')) + + +if __name__ == '__main__': + save_mnist_to_jpg(args.dataset_path, args.output_path) diff --git a/official/cv/MCNN/scripts/run_infer_310.sh b/official/cv/MCNN/scripts/run_infer_310.sh new file mode 100644 index 000000000..fca7103c8 --- /dev/null +++ b/official/cv/MCNN/scripts/run_infer_310.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [[ $# -lt 3 || $# -gt 4 ]]; then + echo "Usage: sh run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [LABEL_PATH] [DEVICE_ID] + DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} + +model=$(get_real_path $1) +data_path=$(get_real_path $2) +label_path=$(get_real_path $3) +if [ $# == 4 ]; then + device_id=$4 +elif [ $# == 3 ]; then + if [ -z $device_id ]; then + device_id=0 + else + device_id=$device_id + fi +fi + +echo $model +echo $data_path +echo $label_path +echo $device_id + +export ASCEND_HOME=/usr/local/Ascend/ +if [ -d ${ASCEND_HOME}/ascend-toolkit ]; then + export PATH=$ASCEND_HOME/fwkacllib/bin:$ASCEND_HOME/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/atc/bin:$PATH + export LD_LIBRARY_PATH=$ASCEND_HOME/fwkacllib/lib64:/usr/local/lib:$ASCEND_HOME/ascend-toolkit/latest/atc/lib64:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH + export TBE_IMPL_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe + export PYTHONPATH=$ASCEND_HOME/fwkacllib/python/site-packages:${TBE_IMPL_PATH}:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/python/site-packages:$PYTHONPATH + export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp +else + export PATH=$ASCEND_HOME/fwkacllib/bin:$ASCEND_HOME/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH + export LD_LIBRARY_PATH=$ASCEND_HOME/fwkacllib/lib64:/usr/local/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH + export PYTHONPATH=$ASCEND_HOME/fwkacllib/python/site-packages:$ASCEND_HOME/atc/python/site-packages:$PYTHONPATH + export ASCEND_OPP_PATH=$ASCEND_HOME/opp +fi + +function preprocess_data() +{ + if [ -d preprocess_Result ]; then + rm -rf ./preprocess_Result + fi + mkdir preprocess_Result + python ../preprocess.py --dataset_path=$data_path --output_path=./preprocess_Result &> preprocess.log + data_path=./preprocess_Result +} + +function compile_app() +{ + cd ../ascend310_infer || exit + if [ -f "Makefile" ]; then + make clean + fi + sh build.sh &> build.log + + if [ $? -ne 0 ]; then + echo "compile app code failed" + exit 1 + fi + cd - || exit +} + +function infer() +{ + if [ -d result_Files ]; then + rm -rf ./result_Files + fi + if [ -d time_Result ]; then + rm -rf ./time_Result + fi + mkdir result_Files + mkdir time_Result + ../ascend310_infer/out/main --model_path=$model --dataset_path=$data_path --device_id=$device_id &> infer.log + + if [ $? -ne 0 ]; then + echo "execute inference failed" + exit 1 + fi +} + +function cal_acc() +{ + python ../postprocess.py --label_dir=$label_path --result_dir=result_Files &> acc.log & + if [ $? -ne 0 ]; then + echo "calculate accuracy failed" + exit 1 + fi +} +preprocess_data +compile_app +infer +cal_acc \ No newline at end of file -- GitLab