Skip to content
Snippets Groups Projects
Unverified Commit 2adcbb19 authored by i-robot's avatar i-robot Committed by Gitee
Browse files

!1388 [西安交通大学][高校贡献][Mindspore][MCNN]-高性能预训练模型提交+SDK推理+mxbase推理+modelarts脚本

Merge pull request !1388 from 188******86/mindx_pr
parents 5fd01cbc c0bdf1b0
No related branches found
No related tags found
No related merge requests found
Showing
with 1767 additions and 0 deletions
ARG FROM_IMAGE_NAME
FROM ${FROM_IMAGE_NAME}
# 配置镜像代理
ENV http_proxy="http://192.168.88.254:8080"
ENV https_proxy="http://192.168.88.254:8080"
# 添加用户以及用户组 username ID为当前用户
RUN useradd -d /home/hwMindX -u 9000 -m -s /bin/bash hwMindX && \
useradd -d /home/HwHiAiUser -u 1000 -m -s /bin/bash HwHiAiUser && \
useradd -d /home/sjtu_liu -u 1001 -m -s /bin/bash sjtu_liu -g HwHiAiUser && \
usermod -a -G HwHiAiUser hwMindX
# 添加Python符号链接
RUN ln -s /usr/local/python3.7.5/bin/python3.7 /usr/bin/python
# 安装相关依赖包,根据实际模型依赖修改
RUN apt-get update && \
apt-get install libglib2.0-dev -y || \
rm -rf /var/lib/dpkg/info && \
mkdir /var/lib/dpkg/info && \
apt-get install libglib2.0-dev dos2unix -y && \
pip install pytest-runner==5.3.0
# 安装Python依赖包,根据实际模型依赖修改
COPY requirements.txt .
RUN pip3.7 install -r requirements.txt
\ No newline at end of file
This diff is collapsed.
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
soc_version=Ascend310
input_shape="x:1,1,1024,1024"
# 帮助信息,不需要修改
if [[ $1 == --help || $1 == -h ]];then
echo"usage:bash ./ATC_AIR_2_OM.sh <args>"
echo " "
echo "parameter explain:"
echo "--model set model place, e.g. --model=/home/mcnn.air"
echo "--output set the name and place of OM model, e.g. --output=/home/HwHiAiUser/fixmatch310_tune4"
echo "--soc_version set the soc_version, default: --soc_version=Ascend310"
echo "--input_shape set the input node and shape, default: --input_shape=x:1,1,1024,1024"
echo "-h/--help show help message"
exit 1
fi
for para in "$@"
do
if [[ $para == --model* ]];then
model=`echo ${para#*=}`
elif [[ $para == --output* ]];then
output=`echo ${para#*=}`
elif [[ $para == --soc_version* ]];then
soc_version=`echo ${para#*=}`
elif [[ $para == --input_shape* ]];then
input_shape=`echo ${para#*=}`
fi
done
if [[ $model == "" ]];then
echo "[Error] para \"model \" must be config"
exit 1
fi
if [[ $output == "" ]];then
echo "[Error] para \"output \" must be config"
exit 1
fi
atc \
--model=${model} \
--output=${output} \
--soc_version=${soc_version} \
--input_shape=${input_shape} \
--framework=1 \
--input_format=NCHW
\ No newline at end of file
#!/usr/bin/env bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
docker_image=$1
data_path=$2
function show_help() {
echo "Usage: docker_start.sh docker_image data_path"
}
function param_check() {
if [ -z "${docker_image}" ]; then
echo "please input docker_image"
show_help
exit 1
fi
if [ -z "${data_path}" ]; then
echo "please input data_path"
show_help
exit 1
fi
}
param_check
docker run -it \
--device=/dev/davinci0 \
--device=/dev/davinci_manager \
--device=/dev/devmm_svm \
--device=/dev/hisi_hdc \
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
-v ${data_path}:${data_path} \
${docker_image} \
/bin/bash
cmake_minimum_required(VERSION 3.14.0)
project(mcnn)
set(TARGET_MAIN Mcnn)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
add_definitions(-Dgoogle=mindxsdk_private)
add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
if(NOT DEFINED ENV{ASCEND_HOME})
message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
endif()
if(NOT DEFINED ENV{ARCH_PATTERN})
message(WARNING "please define environment variable:ARCH_PATTERN")
endif()
set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib)
set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
if(NOT DEFINED ENV{MXSDK_OPENSOURCE_DIR})
message(WARNING "please define environment variable:MXSDK_OPENSOURCE_DIR")
endif()
set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
include_directories(src)
include_directories(${ACL_INC_DIR})
include_directories(${OPENSOURCE_DIR}/include)
include_directories(${OPENSOURCE_DIR}/include/opencv4)
include_directories(${MXBASE_INC})
include_directories(${MXBASE_POST_PROCESS_DIR})
link_directories(${ACL_LIB_DIR})
link_directories(${OPENSOURCE_DIR}/lib)
link_directories(${MXBASE_LIB_DIR})
link_directories(${MXBASE_POST_LIB_DIR})
add_executable(${TARGET_MAIN} src/main.cpp src/Mcnn.cpp)
target_link_libraries(${TARGET_MAIN} glog cpprest mxbase opencv_world)
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
path_cur=$(dirname $0)
function check_env()
{
# set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user
if [ ! "${ASCEND_VERSION}" ]; then
export ASCEND_VERSION=ascend-toolkit/latest
echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}"
else
echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user"
fi
if [ ! "${ARCH_PATTERN}" ]; then
# set ARCH_PATTERN to ./ when it was not specified by user
export ARCH_PATTERN=./
echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}"
else
echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user"
fi
}
function build_mcnn()
{
cd $path_cur
rm -rf build
mkdir -p build
cd build
cmake ..
make
ret=$?
if [ ${ret} -ne 0 ]; then
echo "Failed to build srgan."
exit ${ret}
fi
mv Mcnn ../
}
export ASCEND_VERSION=nnrt/latest
export ARCH_PATTERN=.
export MXSDK_OPENSOURCE_DIR=${MX_SDK_HOME}/opensource
export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib/plugins:${MX_SDK_HOME}/opensource/lib64:${MX_SDK_HOME}/lib:${MX_SDK_HOME}/lib/modelpostprocessors:${MX_SDK_HOME}/opensource/lib:${LD_LIBRARY_PATH}"
check_env
build_mcnn
\ No newline at end of file
/*
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ============================================================================
*/
#include "Mcnn.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <cstdlib>
#include "MxBase/DeviceManager/DeviceManager.h"
#include "MxBase/Log/Log.h"
#include <opencv2/opencv.hpp>
using MxBase::TensorDesc;
using MxBase::TensorBase;
using MxBase::MemoryData;
using MxBase::MemoryHelper;
using MxBase::TENSOR_DTYPE_FLOAT32;
using MxBase::DynamicInfo;
using MxBase::DynamicType;
void PrintTensorShape(const std::vector<MxBase::TensorDesc> &tensorDescVec, const std::string &tensorName) {
LogInfo << "The shape of " << tensorName << " is as follows:";
for (size_t i = 0; i < tensorDescVec.size(); ++i) {
LogInfo << " Tensor " << i << ":";
for (size_t j = 0; j < tensorDescVec[i].tensorDims.size(); ++j) {
LogInfo << " dim: " << j << ": " << tensorDescVec[i].tensorDims[j];
}
}
}
void PrintInputShape(const std::vector<MxBase::TensorBase> &input) {
MxBase::TensorBase img = input[0];
LogInfo << " -------------------------input0 ";
LogInfo << img.GetDataType();
LogInfo << img.GetShape()[0] << ", " << img.GetShape()[1] \
<< ", " << img.GetShape()[2] << ", " << img.GetShape()[3];
LogInfo << img.GetSize();
}
APP_ERROR Mcnn::Init(const InitParam &initParam) {
deviceId_ = initParam.deviceId;
APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
if (ret != APP_ERR_OK) {
LogError << "Init devices failed, ret=" << ret << ".";
return ret;
}
ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
if (ret != APP_ERR_OK) {
LogError << "Set context failed, ret=" << ret << ".";
return ret;
}
dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
ret = dvppWrapper_->Init();
if (ret != APP_ERR_OK) {
LogError << "DvppWrapper init failed, ret=" << ret << ".";
return ret;
}
model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
ret = model_->Init(initParam.modelPath, modelDesc_);
if (ret != APP_ERR_OK) {
LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
return ret;
}
srPath_ = initParam.srPath;
gtPath_ = initParam.gtPath;
PrintTensorShape(modelDesc_.inputTensors, "Model Input Tensors");
PrintTensorShape(modelDesc_.outputTensors, "Model Output Tensors");
return APP_ERR_OK;
}
APP_ERROR Mcnn::DeInit() {
dvppWrapper_->DeInit();
model_->DeInit();
MxBase::DeviceManager::GetInstance()->DestroyDevices();
return APP_ERR_OK;
}
APP_ERROR Mcnn::ReadImage(const std::string &imgPath, cv::Mat *imageMat) {
*imageMat = cv::imread(imgPath, 0);
return APP_ERR_OK;
}
std::string Trim(const std::string &str) {
std::string str_new = str;
str_new.erase(0, str.find_first_not_of(" \t\r\n"));
str_new.erase(str.find_last_not_of(" \t\r\n") + 1);
return str_new;
}
float ReadCsv(std::string csvName) {
std::ifstream fin(csvName);
std::string line;
float num = 0;
while (getline(fin, line)) {
std::istringstream sin(line);
std::vector<std::string> fields;
std::string field;
int len = 0;
while (getline(sin, field, ',')) {
len++;
fields.push_back(field);
}
for (int i = 0; i < len; i++) {
std::string name = Trim(fields[i]);
float num_float = std::stof(name);
num = num + num_float;
}
}
return num;
}
APP_ERROR Mcnn::PadImage(const cv::Mat &imageMat, cv::Mat *imgPad) {
size_t W_o = imageMat.cols, H_o = imageMat.rows;
size_t W_b = 512 - W_o / 2;
size_t H_b = 512 - H_o / 2;
size_t W_e = W_b + W_o;
size_t H_e = H_b + H_o;
for (size_t h = 0; h < 1024; h++) {
for (size_t w = 0; w < 1024; w++) {
if (H_b <= h && h < H_e && W_b <= w && w < W_e) {
imgPad->at<uchar>(h, w) = imageMat.at<uchar>(h - H_b, w - W_b);
} else {
imgPad->at<uchar>(h, w) = 0;
}
}
}
return APP_ERR_OK;
}
APP_ERROR Mcnn::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase) {
uint32_t dataSize = 1;
for (size_t i = 0; i < modelDesc_.inputTensors.size(); ++i) {
std::vector <uint32_t> shape = {};
for (size_t j = 0; j < modelDesc_.inputTensors[i].tensorDims.size(); ++j) {
shape.push_back((uint32_t) modelDesc_.inputTensors[i].tensorDims[j]);
}
for (uint32_t s = 0; s < shape.size(); ++s) {
dataSize *= shape[s];
}
}
APP_ERROR ret = PadImage(imageMat, &imgPad_);
if (ret != APP_ERR_OK) {
LogError << GetError(ret) << "Img pad error";
return ret;
}
// mat NHWC to NCHW
size_t H = 1024, W = 1024, C = 1;
LogInfo << "dataSize:" << dataSize;
dataSize = dataSize * 4;
int id;
for (size_t c = 0; c < C; c++) {
for (size_t h = 0; h < H; h++) {
for (size_t w = 0; w < W; w++) {
id = (C - c - 1) * (H * W) + h * W + w;
mat_data_[id] = static_cast<float>(imgPad_.at<uchar>(h, w));
}
}
}
MemoryData memoryDataDst(dataSize, MemoryData::MEMORY_DEVICE, deviceId_);
MemoryData memoryDataSrc(reinterpret_cast<void *>(&mat_data_[0]), dataSize, MemoryData::MEMORY_HOST_MALLOC);
ret = MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
if (ret != APP_ERR_OK) {
LogError << GetError(ret) << "Memory malloc failed.";
return ret;
}
std::vector <uint32_t> shape = {1, 1, 1024, 1024};
*tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, TENSOR_DTYPE_FLOAT32);
return APP_ERR_OK;
}
APP_ERROR Mcnn::Inference(const std::vector<MxBase::TensorBase> &inputs,
std::vector<MxBase::TensorBase> *outputs) {
auto dtypes = model_->GetOutputDataType();
for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
std::vector<uint32_t> shape = {};
for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
}
MxBase::TensorBase tensor(shape, dtypes[i], MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
APP_ERROR ret = TensorBase::TensorBaseMalloc(tensor);
if (ret != APP_ERR_OK) {
LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
return ret;
}
outputs->push_back(tensor);
}
MxBase::DynamicInfo dynamicInfo = {};
dynamicInfo.dynamicType = DynamicType::STATIC_BATCH;
dynamicInfo.batchSize = 1;
APP_ERROR ret = model_->ModelInference(inputs, *outputs, dynamicInfo);
if (ret != APP_ERR_OK) {
LogError << "ModelInference failed, ret=" << ret << ".";
return ret;
}
return APP_ERR_OK;
}
APP_ERROR Mcnn::PostProcess(std::vector<MxBase::TensorBase> outputs, const std::string &imgName) {
LogInfo << "output_size:" << outputs.size();
LogInfo << "output0_datatype:" << outputs[0].GetDataType();
LogInfo << "output0_shape:" << outputs[0].GetShape()[0] << ", " \
<< outputs[0].GetShape()[1] << ", " << outputs[0].GetShape()[2] << ", " << outputs[0].GetShape()[3];
LogInfo << "output0_bytesize:" << outputs[0].GetByteSize();
APP_ERROR ret = outputs[0].ToHost();
if (ret != APP_ERR_OK) {
LogError << GetError(ret) << "tohost fail.";
return ret;
}
float *outputPtr = reinterpret_cast<float *>(outputs[0].GetBuffer());
size_t H = 1024/4 , W = 1024/4 , C = 1;
float tmpNum;
float pre = 0;
for (size_t c = 0; c < C; c++) {
for (size_t h = 0; h < H; h++) {
for (size_t w = 0; w < W; w++) {
tmpNum = static_cast<float>(*(outputPtr + (C - c - 1) * (H * W) + h * W + w));
pre = pre + tmpNum;
}
}
}
std::string imgName2 = imgName;
int len = imgName.length();
imgName2[len-3] = 'c';
imgName2[len-2] = 's';
imgName2[len-1] = 'v';
std::string gtName = gtPath_;
gtName.append(imgName2);
LogInfo << gtName;
LogInfo << "pre:" << pre;
float gt_count = ReadCsv(gtName);
LogInfo << "gt:" << gt_count;
mae += fabs(gt_count - pre);
mse = mse+ (gt_count - pre)*(gt_count - pre);
LogInfo << "mae:" << fabs(gt_count - pre);
return APP_ERR_OK;
}
APP_ERROR Mcnn::Process(const std::string &imgPath, const std::string &imgName) {
LogInfo << imgName;
cv::Mat imageMat;
APP_ERROR ret = ReadImage(imgPath, &imageMat);
if (ret != APP_ERR_OK) {
LogError << "ReadImage failed, ret=" << ret << ".";
return ret;
}
size_t o_img_W = imageMat.cols, o_img_H = imageMat.rows, o_img_C = imageMat.channels();
LogInfo << o_img_C << "," << o_img_H << "," << o_img_W;
std::vector<MxBase::TensorBase> inputs = {};
std::vector<MxBase::TensorBase> outputs = {};
MxBase::TensorBase tensorBase;
ret = CVMatToTensorBase(imageMat, &tensorBase);
cv::imwrite(srPath_ + "/" + imgName, imgPad_);
if (ret != APP_ERR_OK) {
LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
return ret;
}
inputs.push_back(tensorBase);
auto startTime = std::chrono::high_resolution_clock::now();
ret = Inference(inputs, &outputs);
auto endTime = std::chrono::high_resolution_clock::now();
double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count(); // save time
inferCostTimeMilliSec += costMs;
if (ret != APP_ERR_OK) {
LogError << "Inference failed, ret=" << ret << ".";
return ret;
}
ret = PostProcess(outputs, imgName);
if (ret != APP_ERR_OK) {
LogError << "PostProcess failed, ret=" << ret << ".";
return ret;
}
return APP_ERR_OK;
}
/*
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ============================================================================
*/
#ifndef MCNN_H
#define MCNN_H
#include <dirent.h>
#include <memory>
#include <vector>
#include <map>
#include <string>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include "MxBase/Log/Log.h"
#include "MxBase/DvppWrapper/DvppWrapper.h"
#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
#include "MxBase/Tensor/TensorContext/TensorContext.h"
#include "MxBase/DeviceManager/DeviceManager.h"
struct InitParam {
uint32_t deviceId;
bool checkTensor;
std::string modelPath;
std::string srPath;
std::string gtPath;
};
class Mcnn {
public:
APP_ERROR Init(const InitParam &initParam);
APP_ERROR DeInit();
APP_ERROR ReadImage(const std::string &imgPath, cv::Mat *imageMat);
APP_ERROR PadImage(const cv::Mat &img, cv::Mat *imgPad);
APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase);
APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> *outputs);
APP_ERROR PostProcess(std::vector<MxBase::TensorBase> outputs, const std::string &imgName);
APP_ERROR Process(const std::string &imgPath, const std::string &imgName);
// get infer time
double GetInferCostMilliSec() const {return inferCostTimeMilliSec;}
double GetPSNR() const {return psnr_;}
float getmae() const {return mae;}
float getmse() const {return mse;}
private:
std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
std::string srPath_;
std::string gtPath_;
MxBase::ModelDesc modelDesc_;
double_t psnr_ = 0;
uint32_t deviceId_ = 0;
// infer time
double inferCostTimeMilliSec = 0.0;
float *mat_data_ = new float[1048576];
cv::Mat imgPad_ = cv::Mat(1024, 1024, CV_32FC1);
float mae = 0.0;
float mse = 0.0;
};
#endif
/*
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ============================================================================
*/
#include "Mcnn.h"
APP_ERROR ScanImages(const std::string &path, std::vector<std::string> *imgFiles) {
DIR *dirPtr = opendir(path.c_str());
if (dirPtr == nullptr) {
LogError << "opendir failed. dir:" << path;
return APP_ERR_INTERNAL_ERROR;
}
dirent *direntPtr = nullptr;
while ((direntPtr = readdir(dirPtr)) != nullptr) {
std::string fileName = direntPtr->d_name;
if (fileName == "." || fileName == "..")
continue;
imgFiles->emplace_back(fileName);
}
closedir(dirPtr);
return APP_ERR_OK;
}
int main(int argc, char* argv[]) {
if (argc <= 4) {
LogWarn << "Please input image path, such as './Mcnn [model_path] [data_path] [label_path] [output_path]'.";
return APP_ERR_OK;
}
const std::string modelPath = argv[1];
std::string inputPath = argv[2];
std::string gtPath = argv[3];
std::string srPath = argv[4];
InitParam initParam = {};
initParam.deviceId = 0;
initParam.checkTensor = true;
initParam.modelPath = modelPath;
initParam.srPath = srPath;
initParam.gtPath = gtPath;
auto mcnn = std::make_shared<Mcnn>();
APP_ERROR ret = mcnn->Init(initParam);
if (ret != APP_ERR_OK) {
LogError << "mcnn init failed, ret=" << ret << ".";
return ret;
}
std::vector<std::string> imgFilePaths;
ret = ScanImages(inputPath, &imgFilePaths);
if (ret != APP_ERR_OK) {
LogError << "mcnn lq img scan error, ret=" << ret << ".";
return ret;
}
auto startTime = std::chrono::high_resolution_clock::now();
int totalNum = 0;
sort(imgFilePaths.begin(), imgFilePaths.end());
int imgFilePaths_size = imgFilePaths.size();
for (int i = 0; i < imgFilePaths_size; i++) {
LogInfo << imgFilePaths[i];
}
for (auto &imgFile : imgFilePaths) {
LogInfo << totalNum;
ret = mcnn->Process(inputPath+'/'+imgFile, imgFile);
++totalNum;
if (ret != APP_ERR_OK) {
LogError << "mcnn process failed, ret=" << ret << ".";
mcnn->DeInit();
return ret;
}
}
float mae = mcnn->getmae()/totalNum;
float mse = sqrt(mcnn->getmse()/totalNum);
LogInfo << "mae:" << mae;
LogInfo << "mse:" << mse;
auto endTime = std::chrono::high_resolution_clock::now();
mcnn->DeInit();
double costMilliSecs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
double fps = 1000.0 * imgFilePaths.size() / mcnn->GetInferCostMilliSec();
LogInfo << "[Process Delay] cost: " << costMilliSecs << " ms\tfps: " << fps << " imgs/sec";
return APP_ERR_OK;
}
# coding=utf-8
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import datetime
import os
import sys
import numpy as np
import cv2
from StreamManagerApi import StreamManagerApi
from StreamManagerApi import MxDataInput
from StreamManagerApi import StringVector
from StreamManagerApi import MxProtobufIn
from StreamManagerApi import InProtobufVector
import MxpiDataType_pb2 as MxpiDataType
if __name__ == '__main__':
# init stream manager
stream_manager_api = StreamManagerApi()
ret = stream_manager_api.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
exit()
# create streams by pipeline config file
with open("./mcnn.pipeline", 'rb') as f:
pipelineStr = f.read()
ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
exit()
# Construct the input of the stream
data_input = MxDataInput()
dir_name = sys.argv[1]
gt_name = sys.argv[2]
file_list = os.listdir(dir_name)
file_list.sort()
mae = 0
mse = 0
start_time = datetime.datetime.now()
for file_name in file_list:
file_path = os.path.join(dir_name, file_name)
gt_path = os.path.join(gt_name, file_name[:-3] + 'csv')
if not (file_name.lower().endswith(".jpg") or file_name.lower().endswith(".jpeg") \
or file_name.lower().endswith(".png")):
continue
empty_data = []
stream_name = b'mcnn_opencv'
in_plugin_id = 0
input_key = 'appsrc0'
img = cv2.imread(file_path, 0)
img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
hang_left = 512 - int(ht / 2)
hang_right = 1024 - hang_left - ht
lie_left = 512 - int(wd / 2)
lie_right = 1024 - lie_left - wd
img = np.pad(img, ((hang_left, hang_right), (lie_left, lie_right)), 'constant')
img = img.reshape((1, 1, 1024, 1024))
tensor_list = MxpiDataType.MxpiTensorPackageList()
tensor_pkg = tensor_list.tensorPackageVec.add()
tensor_vec = tensor_pkg.tensorVec.add()
tensor_vec.memType = 0
tensor_vec.tensorShape.extend(img.shape)
tensor_vec.tensorDataType = 0
tensor_vec.dataStr = img.tobytes()
tensor_vec.tensorDataSize = len(img)
buf_type = b"MxTools.MxpiTensorPackageList"
protobuf = MxProtobufIn()
protobuf.key = input_key.encode("utf-8")
protobuf.type = buf_type
protobuf.protobuf = tensor_list.SerializeToString()
protobuf_vec = InProtobufVector()
protobuf_vec.push_back(protobuf)
err_code = stream_manager_api.SendProtobuf(stream_name, in_plugin_id, protobuf_vec)
if err_code != 0:
print(
"Failed to send data to stream, stream_name(%s), plugin_id(%s), element_name(%s), "
"buf_type(%s), err_code(%s).", stream_name, in_plugin_id,
input_key, buf_type, err_code)
keys = [b"mxpi_tensorinfer0",]
keyVec = StringVector()
for key in keys:
keyVec.push_back(key)
infer_result = stream_manager_api.GetProtobuf(stream_name, 0, keyVec)
if infer_result.size() == 0:
print("infer_result is null")
exit()
if infer_result[0].errorCode != 0:
print("GetProtobuf error. errorCode=%d" % (
infer_result[0].errorCode))
exit()
TensorList = MxpiDataType.MxpiTensorPackageList()
TensorList.ParseFromString(infer_result[0].messageBuf)
data = np.frombuffer(TensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)
den = np.loadtxt(open(gt_path, "rb"), delimiter=",", skiprows=0)
den = den.astype(np.float32, copy=False)
gt_count = np.sum(den)
et_count = np.sum(data)
mae += abs(gt_count - et_count)
mse += ((gt_count - et_count) * (gt_count - et_count))
print(file_path, "True value:", np.sum(den), "predictive value:", np.sum(data))
mae = mae / 182
mse = np.sqrt(mse / 182)
end_time = datetime.datetime.now()
print("*********************************************")
print("Final accuracy of the project:")
print('MAE:', mae, ' MSE:', mse)
print("*********************************************")
print("Overall project performance:")
print(182 / (end_time - start_time).seconds, "images/seconds")
# destroy streams
stream_manager_api.DestroyAllStreams()
{
"mcnn_opencv": {
"appsrc0": {
"factory": "appsrc",
"next": "mxpi_tensorinfer0"
},
"mxpi_tensorinfer0": {
"props": {
"dataSource": "appsrc0",
"modelPath": "../model/mcnn.om",
"waitingTime": "2000"
},
"factory": "mxpi_tensorinfer",
"next": "appsink0"
},
"appsink0": {
"factory": "appsink"
}
}
}
\ No newline at end of file
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
input_dir=$1
gt_dir=$2
set -e
# Simple log helper functions
info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
#to set PYTHONPATH, import the StreamManagerApi.py
export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
python3.7 main.py $input_dir $gt_dir
exit 0
\ No newline at end of file
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
######################## train mcnn example ########################
train mcnn and get network model files(.ckpt) :
python train.py
"""
import os
import argparse
import ast
import numpy as np
import mindspore.nn as nn
import mindspore
from mindspore.context import ParallelMode
from mindspore import context, Tensor
from mindspore.communication.management import init, get_rank
from mindspore.train.callback import LossMonitor, TimeMonitor
from mindspore.train.serialization import export, load_checkpoint
from mindspore.train import Model
from src.data_loader import ImageDataLoader
from src.config import crowd_cfg as cfg
from src.dataset import create_dataset
from src.mcnn import MCNN
from src.generator_lr import get_lr_sha
from src.Mcnn_Callback import mcnn_callback
parser = argparse.ArgumentParser(description='MindSpore MCNN Example')
parser.add_argument('--run_offline', type=ast.literal_eval,
default=False, help='run in offline is False or True')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'],
help='device where the code will be implemented (default: Ascend)')
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--lr", type=float, default=cfg['lr'], help="batch size")
parser.add_argument("--momentum", type=float, default=cfg['momentum'], help="batch size")
parser.add_argument("--epoch_size", type=int, default=cfg['epoch_size'], help="batch size")
parser.add_argument("--input_size", type=int, default=1024, help="batch size")
parser.add_argument('--ckpt_path', type=str, default="./train_output", help='Location of ckpt.')
parser.add_argument('--data_url', default=None, help='Location of data.')
parser.add_argument('--train_url', default=None, help='Location of training outputs.')
parser.add_argument('--train_path', default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/train',
help='Location of data.')
parser.add_argument('--train_gt_path',
default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/train_den',
help='Location of data.')
parser.add_argument('--val_path', default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/val',
help='Location of data.')
parser.add_argument('--val_gt_path', default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/val_den',
help='Location of data.')
args = parser.parse_args()
rand_seed = 64678
np.random.seed(rand_seed)
if __name__ == "__main__":
device_num = int(os.getenv("RANK_SIZE", '1'))
device_id = int(os.getenv("DEVICE_ID", '0'))
print("device_id:", device_id)
print("device_num:", device_num)
device_target = args.device_target
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
context.set_context(save_graphs=False)
print(args.data_url)
if device_target == "GPU":
context.set_context(enable_graph_kernel=True)
device_id = 0
if device_num > 1:
init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
device_id = get_rank()
elif device_target == "Ascend":
context.set_context(device_id=device_id)
if device_num > 1:
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
init()
else:
raise ValueError("Unsupported platform.")
if args.run_offline:
local_data1_url = args.train_path
local_data2_url = args.train_gt_path
local_data3_url = args.val_path
local_data4_url = args.val_gt_path
else:
import moxing as mox
local_data1_url = '/cache/train_path'
local_data2_url = '/cache/train_gt_path'
local_data3_url = '/cache/val_path'
local_data4_url = '/cache/val_gt_path'
args.train_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/train")
args.train_gt_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/train_den")
args.val_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/val")
args.val_gt_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/val_den")
args.ckpt_path = args.train_url
mox.file.copy_parallel(src_url=args.train_path, dst_url=local_data1_url) # pcl
mox.file.copy_parallel(src_url=args.train_gt_path, dst_url=local_data2_url) # pcl
mox.file.copy_parallel(src_url=args.val_path, dst_url=local_data3_url) # pcl
mox.file.copy_parallel(src_url=args.val_gt_path, dst_url=local_data4_url) # pcl
data_loader = ImageDataLoader(local_data1_url, local_data2_url, shuffle=True, gt_downsample=True, pre_load=True)
data_loader_val = ImageDataLoader(local_data3_url, local_data4_url,
shuffle=False, gt_downsample=True, pre_load=True)
ds_train = create_dataset(data_loader, target=args.device_target)
ds_val = create_dataset(data_loader_val, target=args.device_target, train=False)
ds_train = ds_train.batch(args.batch_size)
ds_val = ds_val.batch(1)
network = MCNN()
net_loss = nn.MSELoss(reduction='mean')
lr = Tensor(get_lr_sha(0, args.lr, args.epoch_size, ds_train.get_dataset_size()))
net_opt = nn.Adam(list(filter(lambda p: p.requires_grad, network.get_parameters())), learning_rate=lr)
model = Model(network, net_loss, net_opt, amp_level="O2")
print("============== Starting Training ==============")
time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
eval_callback = mcnn_callback(network, ds_val, args.run_offline, args.ckpt_path)
model.train(args.epoch_size, ds_train, callbacks=[time_cb, eval_callback, LossMonitor(1)])
if not args.run_offline:
mox.file.copy_parallel(src_url='/cache/train_output', dst_url=args.ckpt_path)
load_checkpoint('/cache/train_output/best.ckpt', net=network)
inputs = Tensor(np.ones([args.batch_size, 1, args.input_size, args.input_size]), mindspore.float32)
export(network, inputs, file_name="mcnn", file_format="AIR")
print("MCNN exported")
mox.file.copy(src_url='mcnn.air', dst_url=os.path.join(args.train_url, 'mcnn.air'))
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
docker_image=$1
data_dir=$2
model_dir=$3
docker run -it --ipc=host \
--device=/dev/davinci0 \
--device=/dev/davinci1 \
--device=/dev/davinci2 \
--device=/dev/davinci3 \
--device=/dev/davinci4 \
--device=/dev/davinci5 \
--device=/dev/davinci6 \
--device=/dev/davinci7 \
--device=/dev/davinci_manager \
--device=/dev/devmm_svm --device=/dev/hisi_hdc \
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
-v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons/ \
-v ${model_dir}:${model_dir} \
-v ${data_dir}:${data_dir} \
-v ~/ascend/log/npu/conf/slog/slog.conf:/var/log/npu/conf/slog/slog.conf \
-v ~/ascend/log/npu/slog/:/var/log/npu/slog -v ~/ascend/log/npu/profiling/:/var/log/npu/profiling \
-v ~/ascend/log/npu/dump/:/var/log/npu/dump -v ~/ascend/log/npu/:/usr/slog ${docker_image} \
/bin/bash
\ No newline at end of file
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment